From cdd5a197eed0b199f2afdb7b6dc64a73c6bdc313 Mon Sep 17 00:00:00 2001 From: Bri <34875062+Monkatraz@users.noreply.github.com> Date: Thu, 19 Feb 2026 09:13:01 +0000 Subject: [PATCH 01/29] feat: add Python client implementation for River protocol v2.0 Clean room implementation of a River client in Python, referencing the TypeScript client and PROTOCOL.md. Includes full support for all four procedure types (rpc, stream, upload, subscription), transparent reconnection, heartbeat echo, and seq/ack bookkeeping. 40 tests passing against the TypeScript server. --- python-client/.gitignore | 7 + python-client/pyproject.toml | 28 ++ python-client/river/__init__.py | 19 + python-client/river/client.py | 468 +++++++++++++++++++ python-client/river/codec.py | 110 +++++ python-client/river/session.py | 351 ++++++++++++++ python-client/river/streams.py | 153 ++++++ python-client/river/transport.py | 568 +++++++++++++++++++++++ python-client/river/types.py | 259 +++++++++++ python-client/tests/__init__.py | 0 python-client/tests/conftest.py | 85 ++++ python-client/tests/test_e2e.py | 695 ++++++++++++++++++++++++++++ python-client/tests/test_server.mjs | 291 ++++++++++++ 13 files changed, 3034 insertions(+) create mode 100644 python-client/.gitignore create mode 100644 python-client/pyproject.toml create mode 100644 python-client/river/__init__.py create mode 100644 python-client/river/client.py create mode 100644 python-client/river/codec.py create mode 100644 python-client/river/session.py create mode 100644 python-client/river/streams.py create mode 100644 python-client/river/transport.py create mode 100644 python-client/river/types.py create mode 100644 python-client/tests/__init__.py create mode 100644 python-client/tests/conftest.py create mode 100644 python-client/tests/test_e2e.py create mode 100644 python-client/tests/test_server.mjs diff --git a/python-client/.gitignore b/python-client/.gitignore new file mode 100644 index 00000000..cba61eb6 --- /dev/null +++ b/python-client/.gitignore @@ -0,0 +1,7 @@ +.venv/ +__pycache__/ +*.pyc +*.egg-info/ +.pytest_cache/ +dist/ +build/ diff --git a/python-client/pyproject.toml b/python-client/pyproject.toml new file mode 100644 index 00000000..9d681b81 --- /dev/null +++ b/python-client/pyproject.toml @@ -0,0 +1,28 @@ +[build-system] +requires = ["setuptools>=68.0", "wheel"] +build-backend = "setuptools.build_meta" + +[project] +name = "river-client" +version = "0.1.0" +description = "Python client for River protocol v2.0" +readme = "README.md" +requires-python = ">=3.10" +license = {text = "MIT"} +dependencies = [ + "websockets>=12.0", + "msgpack>=1.0", +] + +[project.optional-dependencies] +dev = [ + "pytest>=8.0", + "pytest-asyncio>=0.23", +] + +[tool.pytest.ini_options] +asyncio_mode = "auto" +testpaths = ["tests"] + +[tool.setuptools.packages.find] +include = ["river*"] diff --git a/python-client/river/__init__.py b/python-client/river/__init__.py new file mode 100644 index 00000000..01b30d92 --- /dev/null +++ b/python-client/river/__init__.py @@ -0,0 +1,19 @@ +"""River protocol v2.0 Python client implementation.""" + +from river.types import TransportMessage, Ok, Err +from river.codec import NaiveJsonCodec, BinaryCodec +from river.transport import WebSocketClientTransport +from river.client import RiverClient +from river.streams import Readable, Writable + +__all__ = [ + "RiverClient", + "WebSocketClientTransport", + "NaiveJsonCodec", + "BinaryCodec", + "TransportMessage", + "Ok", + "Err", + "Readable", + "Writable", +] diff --git a/python-client/river/client.py b/python-client/river/client.py new file mode 100644 index 00000000..eb6210d3 --- /dev/null +++ b/python-client/river/client.py @@ -0,0 +1,468 @@ +"""River client for invoking remote procedures. + +Provides the high-level API for calling rpc, stream, upload, and +subscription procedures on a River server. +""" + +from __future__ import annotations + +import asyncio +import logging +from dataclasses import dataclass +from typing import Any, Callable + +from river.streams import Readable, Writable +from river.transport import WebSocketClientTransport +from river.types import ( + ControlFlags, + PartialTransportMessage, + TransportMessage, + cancel_message, + close_stream_message, + err_result, + generate_id, + is_ack, + is_stream_cancel, + is_stream_close, + CANCEL_CODE, + UNEXPECTED_DISCONNECT_CODE, +) + +logger = logging.getLogger(__name__) + + +@dataclass +class RpcResult: + """Result of an RPC call.""" + + ok: bool + payload: Any + + +@dataclass +class StreamResult: + """Result of opening a stream procedure.""" + + req_writable: Writable + res_readable: Readable + + +@dataclass +class UploadResult: + """Result of opening an upload procedure.""" + + req_writable: Writable + finalize: Callable[[], Any] # async callable returning RpcResult + + +@dataclass +class SubscriptionResult: + """Result of opening a subscription procedure.""" + + res_readable: Readable + + +class RiverClient: + """Client for invoking procedures on a River server. + + Usage: + transport = WebSocketClientTransport("ws://localhost:8080", ...) + client = RiverClient(transport, server_id="my-server") + + # RPC + result = await client.rpc("service", "procedure", {"arg": 1}) + + # Stream + stream = client.stream("service", "procedure", {"arg": 1}) + stream.req_writable.write({"data": "hello"}) + async for msg in stream.res_readable: + print(msg) + + # Upload + upload = client.upload("service", "procedure", {"arg": 1}) + upload.req_writable.write({"data": "chunk1"}) + upload.req_writable.close() + result = await upload.finalize() + + # Subscription + sub = client.subscribe("service", "procedure", {"arg": 1}) + async for msg in sub.res_readable: + print(msg) + """ + + def __init__( + self, + transport: WebSocketClientTransport, + server_id: str | None = None, + connect_on_invoke: bool = True, + eagerly_connect: bool = False, + ) -> None: + self._transport = transport + self._server_id = server_id or transport.server_id + self._connect_on_invoke = connect_on_invoke + + if eagerly_connect: + transport.connect(self._server_id) + + @property + def transport(self) -> WebSocketClientTransport: + return self._transport + + async def rpc( + self, + service_name: str, + procedure_name: str, + init: Any, + abort_signal: asyncio.Event | None = None, + ) -> dict[str, Any]: + """Invoke an RPC procedure. + + Returns the result dict: {"ok": True/False, "payload": ...} + """ + result = self._handle_proc( + proc_type="rpc", + service_name=service_name, + procedure_name=procedure_name, + init=init, + abort_signal=abort_signal, + ) + # For RPC, we await the single response + readable = result["res_readable"] + done, value = await readable.next() + if done: + return err_result( + UNEXPECTED_DISCONNECT_CODE, "No response received" + ) + return value + + def stream( + self, + service_name: str, + procedure_name: str, + init: Any, + abort_signal: asyncio.Event | None = None, + ) -> StreamResult: + """Open a stream procedure. + + Returns StreamResult with req_writable and res_readable. + """ + result = self._handle_proc( + proc_type="stream", + service_name=service_name, + procedure_name=procedure_name, + init=init, + abort_signal=abort_signal, + ) + return StreamResult( + req_writable=result["req_writable"], + res_readable=result["res_readable"], + ) + + def upload( + self, + service_name: str, + procedure_name: str, + init: Any, + abort_signal: asyncio.Event | None = None, + ) -> UploadResult: + """Open an upload procedure. + + Returns UploadResult with req_writable and finalize(). + """ + result = self._handle_proc( + proc_type="upload", + service_name=service_name, + procedure_name=procedure_name, + init=init, + abort_signal=abort_signal, + ) + + async def finalize() -> dict[str, Any]: + readable = result["res_readable"] + done, value = await readable.next() + if done: + return err_result( + UNEXPECTED_DISCONNECT_CODE, "No response received" + ) + return value + + return UploadResult( + req_writable=result["req_writable"], + finalize=finalize, + ) + + def subscribe( + self, + service_name: str, + procedure_name: str, + init: Any, + abort_signal: asyncio.Event | None = None, + ) -> SubscriptionResult: + """Open a subscription procedure. + + Returns SubscriptionResult with res_readable. + """ + result = self._handle_proc( + proc_type="subscription", + service_name=service_name, + procedure_name=procedure_name, + init=init, + abort_signal=abort_signal, + ) + return SubscriptionResult(res_readable=result["res_readable"]) + + def _handle_proc( + self, + proc_type: str, + service_name: str, + procedure_name: str, + init: Any, + abort_signal: asyncio.Event | None = None, + ) -> dict[str, Any]: + """Core procedure dispatch logic. + + Sets up the stream, registers message handlers, sends the init message. + """ + to = self._server_id + transport = self._transport + + # If transport is closed, return immediate disconnect error + if transport.get_status() != "open": + res_readable = Readable() + res_readable._push_value( + err_result( + UNEXPECTED_DISCONNECT_CODE, "transport is closed" + ) + ) + res_readable._trigger_close() + req_writable = Writable(write_cb=lambda _: None, close_cb=None) + req_writable._closed = True + return { + "res_readable": res_readable, + "req_writable": req_writable, + } + + # Connect if needed + if self._connect_on_invoke: + transport.connect(to) + + # Get the session and a send function + session = transport._get_or_create_session(to) + session_id = session.id + try: + send_fn = transport.get_session_bound_send_fn(to, session_id) + except RuntimeError: + # Session already dead + res_readable = Readable() + res_readable._push_value( + err_result( + UNEXPECTED_DISCONNECT_CODE, + f"{to} unexpectedly disconnected", + ) + ) + res_readable._trigger_close() + req_writable = Writable(write_cb=lambda _: None, close_cb=None) + req_writable._closed = True + return { + "res_readable": res_readable, + "req_writable": req_writable, + } + + # Determine flags + proc_closes_with_init = proc_type in ("rpc", "subscription") + stream_id = generate_id() + + # Create readable for responses + res_readable: Readable = Readable() + + # Tracking state + clean_close = True + cleaned_up = False + + def cleanup(): + nonlocal cleaned_up + if cleaned_up: + return + cleaned_up = True + transport.remove_event_listener("message", on_message) + transport.remove_event_listener("sessionStatus", on_session_status) + + def close_readable(): + if not res_readable.is_closed(): + try: + res_readable._trigger_close() + except RuntimeError: + pass + if req_writable.is_closed(): + cleanup() + + # Create writable for requests + def write_cb(raw_value: Any) -> None: + try: + send_fn( + PartialTransportMessage( + payload=raw_value, + stream_id=stream_id, + control_flags=0, + ) + ) + except RuntimeError: + pass + + def close_cb() -> None: + nonlocal clean_close + if not proc_closes_with_init and clean_close: + try: + send_fn(close_stream_message(stream_id)) + except RuntimeError: + pass + if res_readable.is_closed(): + cleanup() + + req_writable: Writable = Writable(write_cb=write_cb, close_cb=close_cb) + + def on_message(msg: TransportMessage) -> None: + nonlocal clean_close + if msg.stream_id != stream_id: + return + if msg.to != transport.client_id: + return + + # Cancel from server + if is_stream_cancel(msg.control_flags): + clean_close = False + payload = msg.payload + if isinstance(payload, dict) and "ok" in payload: + res_readable._push_value(payload) + else: + res_readable._push_value( + err_result( + payload.get("code", "UNKNOWN") if isinstance(payload, dict) else "UNKNOWN", + str(payload), + ) + ) + close_readable() + if req_writable.is_writable(): + req_writable._closed = True + return + + if res_readable.is_closed(): + return + + # Normal payload (not a CLOSE control) + if isinstance(msg.payload, dict): + if msg.payload.get("type") != "CLOSE": + if "ok" in msg.payload: + res_readable._push_value(msg.payload) + + # Stream close + if is_stream_close(msg.control_flags): + close_readable() + + def on_session_status(evt: dict) -> None: + nonlocal clean_close + if evt.get("status") != "closing": + return + event_session = evt.get("session") + if event_session is None: + return + if event_session.to_id != to or event_session.id != session_id: + return + + clean_close = False + try: + res_readable._push_value( + err_result( + UNEXPECTED_DISCONNECT_CODE, + f"{to} unexpectedly disconnected", + ) + ) + except RuntimeError: + pass + close_readable() + if req_writable.is_writable(): + req_writable._closed = True + + def on_client_cancel() -> None: + nonlocal clean_close + clean_close = False + try: + res_readable._push_value( + err_result(CANCEL_CODE, "cancelled by client") + ) + except RuntimeError: + pass + close_readable() + if req_writable.is_writable(): + req_writable._closed = True + try: + send_fn( + cancel_message( + stream_id, + err_result(CANCEL_CODE, "cancelled by client"), + ) + ) + except RuntimeError: + pass + + # Register listeners + transport.add_event_listener("message", on_message) + transport.add_event_listener("sessionStatus", on_session_status) + + # Wire up abort signal + if abort_signal is not None: + # Use asyncio task to watch the event + async def _watch_abort(): + await abort_signal.wait() + on_client_cancel() + + try: + loop = asyncio.get_event_loop() + loop.create_task(_watch_abort()) + except RuntimeError: + pass + + # Send init message + init_flags = ( + ControlFlags.StreamOpenBit | ControlFlags.StreamClosedBit + if proc_closes_with_init + else ControlFlags.StreamOpenBit + ) + + try: + send_fn( + PartialTransportMessage( + payload=init, + stream_id=stream_id, + control_flags=init_flags, + service_name=service_name, + procedure_name=procedure_name, + ) + ) + except RuntimeError as e: + # Session dead at send time + try: + res_readable._push_value( + err_result( + UNEXPECTED_DISCONNECT_CODE, + f"{to} unexpectedly disconnected", + ) + ) + res_readable._trigger_close() + except RuntimeError: + pass + req_writable._closed = True + cleanup() + return { + "res_readable": res_readable, + "req_writable": req_writable, + } + + # For rpc/subscription, close request side immediately + if proc_closes_with_init: + req_writable._closed = True + + return { + "res_readable": res_readable, + "req_writable": req_writable, + } diff --git a/python-client/river/codec.py b/python-client/river/codec.py new file mode 100644 index 00000000..3d5e8f11 --- /dev/null +++ b/python-client/river/codec.py @@ -0,0 +1,110 @@ +"""Codec layer for encoding/decoding transport messages.""" + +from __future__ import annotations + +import json +import base64 +from abc import ABC, abstractmethod +from typing import Any + +from river.types import TransportMessage + + +class Codec(ABC): + """Abstract codec for encoding/decoding objects to/from bytes.""" + + @abstractmethod + def to_buffer(self, obj: dict[str, Any]) -> bytes: + """Encode an object to bytes.""" + ... + + @abstractmethod + def from_buffer(self, buf: bytes) -> dict[str, Any]: + """Decode bytes to an object.""" + ... + + +class _CustomEncoder(json.JSONEncoder): + """JSON encoder with support for bytes and large ints.""" + + def default(self, o: Any) -> Any: + if isinstance(o, (bytes, bytearray)): + return {"$t": base64.b64encode(o).decode("ascii")} + return super().default(o) + + +def _custom_object_hook(obj: dict) -> Any: + """JSON decoder hook for custom types.""" + if "$t" in obj and len(obj) == 1: + return base64.b64decode(obj["$t"]) + if "$b" in obj and len(obj) == 1: + return int(obj["$b"]) + return obj + + +class NaiveJsonCodec(Codec): + """Codec using JSON serialization (matches TypeScript NaiveJsonCodec).""" + + name = "naive" + + def to_buffer(self, obj: dict[str, Any]) -> bytes: + return json.dumps(obj, cls=_CustomEncoder, separators=(",", ":")).encode( + "utf-8" + ) + + def from_buffer(self, buf: bytes) -> dict[str, Any]: + return json.loads(buf.decode("utf-8"), object_hook=_custom_object_hook) + + +class BinaryCodec(Codec): + """Codec using msgpack serialization (matches TypeScript BinaryCodec).""" + + name = "binary" + + def to_buffer(self, obj: dict[str, Any]) -> bytes: + import msgpack # type: ignore[import-untyped] + + return msgpack.packb(obj, use_bin_type=True) + + def from_buffer(self, buf: bytes) -> dict[str, Any]: + import msgpack # type: ignore[import-untyped] + + return msgpack.unpackb(buf, raw=False) + + +class CodecMessageAdapter: + """Wraps a Codec with error handling and validation for TransportMessage.""" + + def __init__(self, codec: Codec) -> None: + self._codec = codec + + def to_buffer(self, msg: TransportMessage) -> tuple[bool, bytes | str]: + """Serialize a TransportMessage to bytes. + + Returns (True, bytes) on success, (False, error_reason) on failure. + """ + try: + raw = msg.to_dict() + buf = self._codec.to_buffer(raw) + return True, buf + except Exception as e: + return False, f"Failed to serialize message: {e}" + + def from_buffer(self, buf: bytes) -> tuple[bool, TransportMessage | str]: + """Deserialize bytes to a TransportMessage. + + Returns (True, TransportMessage) on success, (False, error_reason) on failure. + """ + try: + raw = self._codec.from_buffer(buf) + if not isinstance(raw, dict): + return False, f"Expected dict, got {type(raw).__name__}" + # Validate required fields + required = ("id", "from", "to", "seq", "ack", "payload", "streamId") + for field in required: + if field not in raw: + return False, f"Missing required field: {field}" + msg = TransportMessage.from_dict(raw) + return True, msg + except Exception as e: + return False, f"Failed to deserialize message: {e}" diff --git a/python-client/river/session.py b/python-client/river/session.py new file mode 100644 index 00000000..f97daffb --- /dev/null +++ b/python-client/river/session.py @@ -0,0 +1,351 @@ +"""Session state machine for River protocol. + +Manages seq/ack bookkeeping, send buffers, and session lifecycle. +""" + +from __future__ import annotations + +import asyncio +import logging +import time +from dataclasses import dataclass, field +from enum import Enum +from typing import Any, Callable + +from river.codec import CodecMessageAdapter +from river.types import ( + ControlFlags, + PartialTransportMessage, + TransportMessage, + generate_id, + handshake_request_payload, + heartbeat_message, + is_ack, + PROTOCOL_VERSION, +) + +logger = logging.getLogger(__name__) + + +class SessionState(str, Enum): + """Session state machine states.""" + + NO_CONNECTION = "NoConnection" + BACKING_OFF = "BackingOff" + CONNECTING = "Connecting" + HANDSHAKING = "Handshaking" + CONNECTED = "Connected" + + +@dataclass +class SessionOptions: + """Configuration options for a session.""" + + heartbeat_interval_ms: float = 1000 + heartbeats_until_dead: int = 2 + session_disconnect_grace_ms: float = 5000 + connection_timeout_ms: float = 2000 + handshake_timeout_ms: float = 1000 + enable_transparent_reconnects: bool = True + + +DEFAULT_SESSION_OPTIONS = SessionOptions() + + +class Session: + """Represents a River session with seq/ack bookkeeping and send buffer. + + A session persists across potentially multiple connections, tracking + all the state needed for transparent reconnection. + """ + + def __init__( + self, + session_id: str, + from_id: str, + to_id: str, + codec: CodecMessageAdapter, + options: SessionOptions | None = None, + ) -> None: + self.id = session_id + self.from_id = from_id + self.to_id = to_id + self.codec = codec + self.options = options or DEFAULT_SESSION_OPTIONS + + # Seq/ack bookkeeping + self.seq: int = 0 # Next seq to assign when sending + self.ack: int = 0 # Next expected seq from the other side + self.send_buffer: list[TransportMessage] = [] + + # State machine + self.state: SessionState = SessionState.NO_CONNECTION + + # Connection + self._ws: Any = None # The WebSocket connection + self._is_actively_heartbeating: bool = False + + # Timers + self._heartbeat_task: asyncio.Task | None = None + self._heartbeat_miss_task: asyncio.Task | None = None + self._grace_period_task: asyncio.Task | None = None + self._grace_expiry_time: float | None = None + + # Callbacks + self._on_message: Callable[[TransportMessage], None] | None = None + self._on_connection_closed: Callable[[], None] | None = None + self._on_session_grace_elapsed: Callable[[], None] | None = None + + self._destroyed = False + + @property + def next_seq(self) -> int: + """The next seq the other side should see from us. + + Returns the seq of the first unacked message in the buffer, + or our current seq if the buffer is empty. + """ + if self.send_buffer: + return self.send_buffer[0].seq + return self.seq + + def construct_msg( + self, partial: PartialTransportMessage + ) -> TransportMessage: + """Construct a full TransportMessage from a partial one. + + Fills in id, from, to, seq, ack and increments seq. + """ + msg = TransportMessage( + id=generate_id(), + from_=self.from_id, + to=self.to_id, + seq=self.seq, + ack=self.ack, + payload=partial.payload, + stream_id=partial.stream_id, + control_flags=partial.control_flags, + service_name=partial.service_name, + procedure_name=partial.procedure_name, + tracing=partial.tracing, + ) + self.seq += 1 + return msg + + def send(self, partial: PartialTransportMessage) -> tuple[bool, str]: + """Construct and send a message. + + When connected, sends immediately over the wire and buffers. + When disconnected, only buffers. + + Returns (True, msg_id) on success, (False, reason) on failure. + """ + msg = self.construct_msg(partial) + self.send_buffer.append(msg) + + if self.state == SessionState.CONNECTED and self._ws is not None: + ok, result = self._send_over_wire(msg) + if not ok: + return False, result + return True, msg.id + + def _send_over_wire(self, msg: TransportMessage) -> tuple[bool, str]: + """Serialize and send a message over the current connection.""" + ok, result = self.codec.to_buffer(msg) + if not ok: + return False, result # type: ignore[return-value] + try: + assert self._ws is not None + # websockets library uses async send, but we schedule it + asyncio.get_event_loop().call_soon( + lambda data=result: self._do_ws_send(data) + ) + return True, msg.id + except Exception as e: + return False, f"Failed to send: {e}" + + def _do_ws_send(self, data: bytes) -> None: + """Actually send data over the WebSocket.""" + if self._ws is not None and not self._destroyed: + try: + asyncio.ensure_future(self._ws.send(data)) + except Exception as e: + logger.error("WebSocket send error: %s", e) + + def send_buffered_messages(self) -> tuple[bool, str | None]: + """Retransmit all buffered messages over the current connection. + + Called after a successful reconnection handshake. + """ + for msg in self.send_buffer: + ok, reason = self._send_over_wire(msg) + if not ok: + return False, reason + return True, None + + def update_bookkeeping(self, their_ack: int, their_seq: int) -> None: + """Update seq/ack bookkeeping based on an incoming message. + + - Removes acknowledged messages from the send buffer. + - Updates our ack to their_seq + 1. + - Resets the heartbeat miss timeout. + """ + # Remove acked messages from send buffer + self.send_buffer = [m for m in self.send_buffer if m.seq >= their_ack] + # Update our ack + self.ack = their_seq + 1 + # Reset heartbeat miss timer + self._reset_heartbeat_miss_timeout() + + def send_heartbeat(self) -> None: + """Send a heartbeat message.""" + self.send(heartbeat_message()) + + def start_active_heartbeat(self, loop: asyncio.AbstractEventLoop) -> None: + """Start sending heartbeats at the configured interval (server behavior).""" + self._is_actively_heartbeating = True + interval = self.options.heartbeat_interval_ms / 1000.0 + + async def _heartbeat_loop(): + try: + while not self._destroyed and self.state == SessionState.CONNECTED: + await asyncio.sleep(interval) + if not self._destroyed and self.state == SessionState.CONNECTED: + self.send_heartbeat() + except asyncio.CancelledError: + pass + + self._heartbeat_task = loop.create_task(_heartbeat_loop()) + + def start_heartbeat_miss_timeout(self, loop: asyncio.AbstractEventLoop) -> None: + """Start the missing heartbeat timeout.""" + miss_duration = ( + self.options.heartbeats_until_dead + * self.options.heartbeat_interval_ms + / 1000.0 + ) + + async def _miss_timeout(): + try: + await asyncio.sleep(miss_duration) + if not self._destroyed and self._on_connection_closed: + logger.debug( + "Session %s: heartbeat miss timeout, closing connection", + self.id, + ) + self._on_connection_closed() + except asyncio.CancelledError: + pass + + if self._heartbeat_miss_task: + self._heartbeat_miss_task.cancel() + self._heartbeat_miss_task = loop.create_task(_miss_timeout()) + + def _reset_heartbeat_miss_timeout(self) -> None: + """Reset the heartbeat miss timer.""" + if self._heartbeat_miss_task: + self._heartbeat_miss_task.cancel() + self._heartbeat_miss_task = None + try: + loop = asyncio.get_event_loop() + if loop.is_running(): + self.start_heartbeat_miss_timeout(loop) + except RuntimeError: + pass + + def start_grace_period(self, loop: asyncio.AbstractEventLoop) -> None: + """Start the session disconnect grace period. + + If the session is not reconnected within this time, it's destroyed. + """ + grace_ms = self.options.session_disconnect_grace_ms + self._grace_expiry_time = time.monotonic() + grace_ms / 1000.0 + + async def _grace_timeout(): + try: + await asyncio.sleep(grace_ms / 1000.0) + if not self._destroyed and self._on_session_grace_elapsed: + logger.debug( + "Session %s: grace period elapsed, destroying", self.id + ) + self._on_session_grace_elapsed() + except asyncio.CancelledError: + pass + + if self._grace_period_task: + self._grace_period_task.cancel() + self._grace_period_task = loop.create_task(_grace_timeout()) + + def cancel_grace_period(self) -> None: + """Cancel the session disconnect grace period.""" + if self._grace_period_task: + self._grace_period_task.cancel() + self._grace_period_task = None + self._grace_expiry_time = None + + def cancel_heartbeats(self) -> None: + """Cancel all heartbeat-related tasks.""" + if self._heartbeat_task: + self._heartbeat_task.cancel() + self._heartbeat_task = None + if self._heartbeat_miss_task: + self._heartbeat_miss_task.cancel() + self._heartbeat_miss_task = None + self._is_actively_heartbeating = False + + def set_connected(self, ws: Any, loop: asyncio.AbstractEventLoop) -> None: + """Transition to connected state.""" + self.state = SessionState.CONNECTED + self._ws = ws + self.cancel_grace_period() + self.start_heartbeat_miss_timeout(loop) + + def set_disconnected(self, loop: asyncio.AbstractEventLoop) -> None: + """Transition to disconnected state (no connection).""" + self.state = SessionState.NO_CONNECTION + self.cancel_heartbeats() + old_ws = self._ws + self._ws = None + if old_ws is not None: + try: + asyncio.ensure_future(old_ws.close()) + except Exception: + pass + self.start_grace_period(loop) + + def destroy(self) -> None: + """Destroy the session, cleaning up all resources.""" + self._destroyed = True + self.cancel_heartbeats() + self.cancel_grace_period() + if self._ws is not None: + try: + asyncio.ensure_future(self._ws.close()) + except Exception: + pass + self._ws = None + self.send_buffer.clear() + + def create_handshake_request( + self, metadata: Any = None + ) -> TransportMessage: + """Create a handshake request transport message. + + Handshake messages have seq=0, ack=0, controlFlags=0. + """ + payload = handshake_request_payload( + session_id=self.id, + next_expected_seq=self.ack, + next_sent_seq=self.next_seq, + metadata=metadata, + ) + return TransportMessage( + id=generate_id(), + from_=self.from_id, + to=self.to_id, + seq=0, + ack=0, + payload=payload, + stream_id="handshake", + control_flags=0, + ) diff --git a/python-client/river/streams.py b/python-client/river/streams.py new file mode 100644 index 00000000..03bfd538 --- /dev/null +++ b/python-client/river/streams.py @@ -0,0 +1,153 @@ +"""Readable and Writable stream abstractions for River procedures.""" + +from __future__ import annotations + +import asyncio +from typing import Any, Callable, Generic, TypeVar + +T = TypeVar("T") + + +class ReadableBrokenError(Exception): + """Raised when a readable stream is broken.""" + + pass + + +class Readable(Generic[T]): + """Async readable stream for consuming procedure results. + + Supports async iteration via `async for` and explicit read via `next()`. + """ + + def __init__(self) -> None: + self._queue: list[T] = [] + self._closed = False + self._broken = False + self._locked = False + self._waiters: list[asyncio.Future[None]] = [] + + def _push_value(self, value: T) -> None: + """Push a value into the readable stream (internal use).""" + if self._closed: + raise RuntimeError("Cannot push to a closed readable") + self._queue.append(value) + self._notify_waiters() + + def _trigger_close(self) -> None: + """Close the readable stream (internal use).""" + if self._closed: + raise RuntimeError("Readable already closed") + self._closed = True + self._notify_waiters() + + def _notify_waiters(self) -> None: + while self._waiters: + w = self._waiters.pop(0) + if not w.done(): + w.set_result(None) + + def is_readable(self) -> bool: + """Whether the stream can still be iterated (not locked or broken).""" + return not self._locked and not self._broken + + def is_closed(self) -> bool: + """Whether the stream has been closed.""" + return self._closed and len(self._queue) == 0 + + def break_(self) -> None: + """Break the stream, discarding all queued values.""" + if self._locked and self._broken: + return + self._broken = True + self._locked = True + self._queue.clear() + self._notify_waiters() + + async def collect(self) -> list[T]: + """Consume all values from the stream until it closes. + + Locks the stream. Returns the list of all values. + """ + self._locked = True + results: list[T] = [] + async for item in self._iterate(): + results.append(item) + return results + + async def next(self) -> tuple[bool, T | None]: + """Read the next value from the stream. + + Returns (False, value) if a value is available. + Returns (True, None) if the stream is done. + """ + async for item in self._iterate(): + return False, item + return True, None + + async def _iterate(self): + """Internal async generator.""" + self._locked = True + while True: + if self._broken: + yield {"ok": False, "payload": {"code": "READABLE_BROKEN", "message": "stream was broken"}} + return + + if self._queue: + yield self._queue.pop(0) + continue + + if self._closed: + return + + # Wait for more data + loop = asyncio.get_event_loop() + fut: asyncio.Future[None] = loop.create_future() + self._waiters.append(fut) + await fut + + def __aiter__(self): + self._locked = True + return self._async_iter_impl() + + async def _async_iter_impl(self): + async for item in self._iterate(): + yield item + + +class Writable(Generic[T]): + """Writable stream for sending procedure requests. + + Wraps a write callback and a close callback. + """ + + def __init__( + self, + write_cb: Callable[[T], None], + close_cb: Callable[[], None] | None = None, + ) -> None: + self._write_cb = write_cb + self._close_cb = close_cb + self._closed = False + + def write(self, value: T) -> None: + """Write a value to the stream.""" + if self._closed: + raise RuntimeError("Cannot write to a closed writable") + self._write_cb(value) + + def close(self, value: T | None = None) -> None: + """Close the stream, optionally writing a final value.""" + if self._closed: + return # Idempotent + self._closed = True + if value is not None: + self._write_cb(value) + if self._close_cb: + self._close_cb() + + def is_writable(self) -> bool: + return not self._closed + + def is_closed(self) -> bool: + return self._closed diff --git a/python-client/river/transport.py b/python-client/river/transport.py new file mode 100644 index 00000000..5ca7ca72 --- /dev/null +++ b/python-client/river/transport.py @@ -0,0 +1,568 @@ +"""Client transport layer for the River protocol. + +Manages WebSocket connections, session lifecycle, handshake, +reconnection with backoff, and message dispatch. +""" + +from __future__ import annotations + +import asyncio +import logging +import math +import random +import time +from typing import Any, Callable + +from river.codec import Codec, CodecMessageAdapter, NaiveJsonCodec +from river.session import Session, SessionOptions, SessionState, DEFAULT_SESSION_OPTIONS +from river.types import ( + ControlFlags, + PartialTransportMessage, + TransportMessage, + generate_id, + is_ack, + is_stream_cancel, + is_stream_close, + is_stream_open, + RETRIABLE_HANDSHAKE_CODES, + FATAL_HANDSHAKE_CODES, + UNEXPECTED_DISCONNECT_CODE, + err_result, +) + +logger = logging.getLogger(__name__) + + +class EventDispatcher: + """Simple event dispatcher with typed event names.""" + + def __init__(self) -> None: + self._handlers: dict[str, set[Callable]] = {} + + def add_listener(self, event: str, handler: Callable) -> None: + if event not in self._handlers: + self._handlers[event] = set() + self._handlers[event].add(handler) + + def remove_listener(self, event: str, handler: Callable) -> None: + if event in self._handlers: + self._handlers[event].discard(handler) + + def dispatch(self, event: str, data: Any = None) -> None: + if event in self._handlers: + # Copy to avoid mutation during iteration + for handler in list(self._handlers[event]): + try: + handler(data) + except Exception as e: + logger.error("Event handler error for %s: %s", event, e) + + def listener_count(self, event: str) -> int: + return len(self._handlers.get(event, set())) + + +class LeakyBucketRateLimit: + """Rate limiter with exponential backoff for connection retries.""" + + def __init__( + self, + base_interval_ms: float = 150, + max_jitter_ms: float = 200, + max_backoff_ms: float = 32_000, + attempt_budget_capacity: int = 5, + budget_restore_interval_ms: float = 200, + ) -> None: + self.base_interval_ms = base_interval_ms + self.max_jitter_ms = max_jitter_ms + self.max_backoff_ms = max_backoff_ms + self.attempt_budget_capacity = attempt_budget_capacity + self.budget_restore_interval_ms = budget_restore_interval_ms + self.budget_consumed: int = 0 + self._restore_task: asyncio.Task | None = None + + def has_budget(self) -> bool: + return self.budget_consumed < self.attempt_budget_capacity + + def get_backoff_ms(self) -> float: + if self.budget_consumed == 0: + return 0 + exponent = max(0, self.budget_consumed - 1) + jitter = random.random() * self.max_jitter_ms + backoff = min( + self.base_interval_ms * (2**exponent), self.max_backoff_ms + ) + return backoff + jitter + + def consume_budget(self) -> None: + self._stop_restore() + self.budget_consumed += 1 + + def start_restoring_budget(self) -> None: + """Start gradually restoring budget after a successful connection.""" + self._stop_restore() + + async def _restore_loop(): + try: + while self.budget_consumed > 0: + await asyncio.sleep( + self.budget_restore_interval_ms / 1000.0 + ) + self.budget_consumed = max(0, self.budget_consumed - 1) + except asyncio.CancelledError: + pass + + try: + loop = asyncio.get_event_loop() + self._restore_task = loop.create_task(_restore_loop()) + except RuntimeError: + pass + + def _stop_restore(self) -> None: + if self._restore_task: + self._restore_task.cancel() + self._restore_task = None + + def reset(self) -> None: + self.budget_consumed = 0 + self._stop_restore() + + +class WebSocketClientTransport: + """Client-side transport managing WebSocket connections and sessions. + + Handles connection lifecycle, handshakes, reconnection with backoff, + heartbeat echo, and message dispatch. + """ + + def __init__( + self, + ws_url: str | Callable[..., str], + client_id: str | None = None, + server_id: str | None = None, + codec: Codec | None = None, + options: SessionOptions | None = None, + handshake_metadata: Any = None, + connect_on_invoke: bool = True, + eagerly_connect: bool = False, + ) -> None: + self.client_id = client_id or generate_id() + self.server_id = server_id or "SERVER" + self._ws_url = ws_url + self._codec = codec or NaiveJsonCodec() + self._codec_adapter = CodecMessageAdapter(self._codec) + self.options = options or DEFAULT_SESSION_OPTIONS + self._handshake_metadata = handshake_metadata + self._connect_on_invoke = connect_on_invoke + + # State + self._status: str = "open" # 'open' | 'closed' + self.sessions: dict[str, Session] = {} # to_id -> Session + self._events = EventDispatcher() + self._retry_budget = LeakyBucketRateLimit() + self._reconnect_on_connection_drop = True + + # Connection tasks + self._connect_tasks: dict[str, asyncio.Task] = {} + + self._loop: asyncio.AbstractEventLoop | None = None + + def get_status(self) -> str: + return self._status + + def _get_loop(self) -> asyncio.AbstractEventLoop: + if self._loop is None: + self._loop = asyncio.get_event_loop() + return self._loop + + # --- Event API --- + + def add_event_listener(self, event: str, handler: Callable) -> None: + self._events.add_listener(event, handler) + + def remove_event_listener(self, event: str, handler: Callable) -> None: + self._events.remove_listener(event, handler) + + # --- Session Management --- + + def _get_or_create_session(self, to: str) -> Session: + """Get an existing session or create a new unconnected one.""" + if to in self.sessions: + return self.sessions[to] + session = Session( + session_id=generate_id(), + from_id=self.client_id, + to_id=to, + codec=self._codec_adapter, + options=self.options, + ) + session._on_session_grace_elapsed = lambda: self._on_session_grace_elapsed(to) + self.sessions[to] = session + self._events.dispatch( + "sessionStatus", {"status": "created", "session": session} + ) + return session + + def _delete_session(self, to: str, emit_closing: bool = True) -> None: + """Delete a session and clean up.""" + session = self.sessions.pop(to, None) + if session is None: + return + if emit_closing: + self._events.dispatch( + "sessionStatus", {"status": "closing", "session": session} + ) + session.destroy() + self._events.dispatch( + "sessionStatus", {"status": "closed", "session": session} + ) + + def _on_session_grace_elapsed(self, to: str) -> None: + """Called when a session's grace period expires.""" + logger.debug("Session grace period elapsed for %s", to) + self._delete_session(to) + + # --- Connection Flow --- + + def connect(self, to: str | None = None) -> None: + """Initiate a connection to the given server. + + Follows the state transition: + NoConnection -> BackingOff -> Connecting -> Handshaking -> Connected + """ + to = to or self.server_id + if self._status != "open": + return + + session = self._get_or_create_session(to) + if session.state != SessionState.NO_CONNECTION: + return # Already connecting/connected + + if not self._retry_budget.has_budget(): + self._events.dispatch( + "protocolError", + {"type": "conn_retry_exceeded", "message": "Retries exceeded"}, + ) + return + + backoff_ms = self._retry_budget.get_backoff_ms() + self._retry_budget.consume_budget() + + # Schedule the connection attempt after backoff + loop = self._get_loop() + session.state = SessionState.BACKING_OFF + + async def _do_connect(): + try: + if backoff_ms > 0: + await asyncio.sleep(backoff_ms / 1000.0) + + if self._status != "open" or session._destroyed: + return + + session.state = SessionState.CONNECTING + ws = await self._create_connection(to) + + if session._destroyed: + await ws.close() + return + + session.state = SessionState.HANDSHAKING + await self._do_handshake(session, ws, to) + except asyncio.CancelledError: + pass + except Exception as e: + logger.debug("Connection attempt failed for %s: %s", to, e) + if not session._destroyed: + self._on_connection_failed(to) + + task = loop.create_task(_do_connect()) + self._connect_tasks[to] = task + + async def _create_connection(self, to: str) -> Any: + """Create a new WebSocket connection.""" + import websockets # type: ignore[import-untyped] + + url = self._ws_url if isinstance(self._ws_url, str) else self._ws_url(to) + + ws = await asyncio.wait_for( + websockets.connect(url, max_size=None, ping_interval=None, ping_timeout=None), + timeout=self.options.connection_timeout_ms / 1000.0, + ) + return ws + + async def _do_handshake( + self, session: Session, ws: Any, to: str + ) -> None: + """Perform the handshake on a newly connected WebSocket.""" + # Send handshake request + hs_msg = session.create_handshake_request( + metadata=self._handshake_metadata + ) + ok, buf = self._codec_adapter.to_buffer(hs_msg) + if not ok: + logger.error("Failed to encode handshake: %s", buf) + await ws.close() + self._on_connection_failed(to) + return + + await ws.send(buf) + + # Wait for handshake response + try: + response_bytes = await asyncio.wait_for( + ws.recv(), timeout=self.options.handshake_timeout_ms / 1000.0 + ) + except (asyncio.TimeoutError, Exception) as e: + logger.debug("Handshake timeout/error for %s: %s", to, e) + await ws.close() + self._on_connection_failed(to) + return + + if isinstance(response_bytes, str): + response_bytes = response_bytes.encode("utf-8") + + ok, result = self._codec_adapter.from_buffer(response_bytes) + if not ok: + logger.error("Failed to decode handshake response: %s", result) + await ws.close() + self._on_connection_failed(to) + return + + response_msg: TransportMessage = result # type: ignore[assignment] + payload = response_msg.payload + + # Validate handshake response + if ( + not isinstance(payload, dict) + or payload.get("type") != "HANDSHAKE_RESP" + ): + logger.error("Invalid handshake response payload") + await ws.close() + self._on_connection_failed(to) + return + + status = payload.get("status", {}) + if not status.get("ok"): + code = status.get("code", "UNKNOWN") + reason = status.get("reason", "Unknown reason") + logger.debug( + "Handshake rejected for %s: %s (%s)", to, reason, code + ) + await ws.close() + + if code in RETRIABLE_HANDSHAKE_CODES: + # Session state mismatch - destroy session and retry + self._delete_session(to) + self._try_reconnecting(to) + else: + self._events.dispatch( + "protocolError", + { + "type": "handshake_failed", + "message": reason, + "code": code, + }, + ) + self._on_connection_failed(to) + return + + # Check session ID match + resp_session_id = status.get("sessionId") + if resp_session_id != session.id: + # Server assigned a different session - old session is stale + logger.debug( + "Session ID mismatch: expected %s, got %s", + session.id, + resp_session_id, + ) + # The server lost our session state; destroy old and create new + self._delete_session(to, emit_closing=True) + self._try_reconnecting(to) + return + + # Handshake successful + loop = self._get_loop() + session.set_connected(ws, loop) + self._events.dispatch( + "sessionTransition", + {"state": SessionState.CONNECTED, "id": session.id}, + ) + + # Retransmit buffered messages + ok, err = session.send_buffered_messages() + if not ok: + logger.error("Failed to send buffered messages: %s", err) + self._events.dispatch( + "protocolError", + {"type": "message_send_failure", "message": err}, + ) + self._delete_session(to) + return + + # Start restoring retry budget + self._retry_budget.start_restoring_budget() + + # Start listening for messages + self._start_message_listener(session, ws, to) + + def _start_message_listener( + self, session: Session, ws: Any, to: str + ) -> None: + """Start the async message listener on the WebSocket.""" + loop = self._get_loop() + + session._on_connection_closed = lambda: self._on_connection_dropped(to) + + async def _listen(): + try: + async for raw_msg in ws: + if session._destroyed: + break + if isinstance(raw_msg, str): + raw_msg = raw_msg.encode("utf-8") + self._on_message_data(session, raw_msg, to) + except Exception as e: + if not session._destroyed: + logger.debug( + "WebSocket error for session %s: %s", session.id, e + ) + finally: + if not session._destroyed: + self._on_connection_dropped(to) + + loop.create_task(_listen()) + + def _on_message_data( + self, session: Session, raw: bytes, to: str + ) -> None: + """Handle raw bytes received from the WebSocket.""" + ok, result = self._codec_adapter.from_buffer(raw) + if not ok: + self._events.dispatch( + "protocolError", + {"type": "invalid_message", "message": result}, + ) + return + + msg: TransportMessage = result # type: ignore[assignment] + + # Check message ordering + if msg.seq != session.ack: + if msg.seq < session.ack: + # Duplicate - discard silently + return + else: + # Future message - close connection to force re-handshake + logger.debug( + "Seq out of order: expected %d, got %d. Closing.", + session.ack, + msg.seq, + ) + if session._ws: + asyncio.ensure_future(session._ws.close()) + return + + # Update bookkeeping + session.update_bookkeeping(msg.ack, msg.seq) + + # Dispatch non-heartbeat messages + if not is_ack(msg.control_flags): + self._events.dispatch("message", msg) + return + + # If this is a heartbeat and we're not actively heartbeating (client), + # echo back + if not session._is_actively_heartbeating: + session.send_heartbeat() + + def _on_connection_dropped(self, to: str) -> None: + """Handle a dropped connection.""" + session = self.sessions.get(to) + if session is None or session._destroyed: + return + if session.state != SessionState.CONNECTED: + return + + loop = self._get_loop() + session.set_disconnected(loop) + self._events.dispatch( + "sessionTransition", + {"state": SessionState.NO_CONNECTION, "id": session.id}, + ) + + if self._reconnect_on_connection_drop: + self._try_reconnecting(to) + + def _on_connection_failed(self, to: str) -> None: + """Handle a failed connection attempt.""" + session = self.sessions.get(to) + if session is None or session._destroyed: + return + + loop = self._get_loop() + session.state = SessionState.NO_CONNECTION + + if self._reconnect_on_connection_drop: + self._try_reconnecting(to) + + def _try_reconnecting(self, to: str) -> None: + """Try to reconnect to the server.""" + if self._status != "open": + return + if not self._reconnect_on_connection_drop: + return + # Use call_soon to break out of the current call stack + loop = self._get_loop() + loop.call_soon(lambda: self.connect(to)) + + # --- Session-Bound Send --- + + def get_session_bound_send_fn( + self, to: str, session_id: str + ) -> Callable[[PartialTransportMessage], str]: + """Get a send function scoped to a specific session. + + The send function will raise if the session has been replaced or destroyed. + """ + + def _send(msg: PartialTransportMessage) -> str: + session = self.sessions.get(to) + if session is None: + raise RuntimeError("Session scope ended (closed)") + if session.id != session_id or session._destroyed: + raise RuntimeError("Session scope ended (transition)") + + ok, result = session.send(msg) + if not ok: + raise RuntimeError(f"Send failed: {result}") + return result + + return _send + + # --- Lifecycle --- + + async def close(self) -> None: + """Close the transport and all sessions.""" + if self._status == "closed": + return + self._status = "closed" + + # Cancel all pending connection tasks + for task in self._connect_tasks.values(): + task.cancel() + self._connect_tasks.clear() + + # Delete all sessions + for to in list(self.sessions.keys()): + self._delete_session(to) + + self._retry_budget.reset() + self._events.dispatch("transportStatus", {"status": "closed"}) + + @property + def reconnect_on_connection_drop(self) -> bool: + return self._reconnect_on_connection_drop + + @reconnect_on_connection_drop.setter + def reconnect_on_connection_drop(self, value: bool) -> None: + self._reconnect_on_connection_drop = value diff --git a/python-client/river/types.py b/python-client/river/types.py new file mode 100644 index 00000000..591ed830 --- /dev/null +++ b/python-client/river/types.py @@ -0,0 +1,259 @@ +"""Core types for the River protocol.""" + +from __future__ import annotations + +import string +import random +from dataclasses import dataclass, field +from enum import IntFlag +from typing import Any, TypeVar, Generic, Union + + +# --- ID Generation --- + +_ID_ALPHABET = string.ascii_letters + string.digits +_ID_LENGTH = 12 + + +def generate_id() -> str: + """Generate a nanoid-style random ID (12 chars, alphanumeric).""" + return "".join(random.choices(_ID_ALPHABET, k=_ID_LENGTH)) + + +# --- Control Flags --- + + +class ControlFlags(IntFlag): + """Bit flags for transport message control signals.""" + + AckBit = 0b00001 # 1 - heartbeat/ack only + StreamOpenBit = 0b00010 # 2 - first message of a stream + StreamCancelBit = 0b00100 # 4 - abrupt cancel with ProtocolError payload + StreamClosedBit = 0b01000 # 8 - last message of a stream + + +def is_ack(flags: int) -> bool: + return (flags & ControlFlags.AckBit) == ControlFlags.AckBit + + +def is_stream_open(flags: int) -> bool: + return (flags & ControlFlags.StreamOpenBit) == ControlFlags.StreamOpenBit + + +def is_stream_cancel(flags: int) -> bool: + return (flags & ControlFlags.StreamCancelBit) == ControlFlags.StreamCancelBit + + +def is_stream_close(flags: int) -> bool: + return (flags & ControlFlags.StreamClosedBit) == ControlFlags.StreamClosedBit + + +# --- Transport Message --- + + +@dataclass +class TransportMessage: + """The envelope for all messages sent over the wire.""" + + id: str + from_: str # 'from' is a Python keyword + to: str + seq: int + ack: int + payload: Any + stream_id: str + control_flags: int = 0 + service_name: str | None = None + procedure_name: str | None = None + tracing: dict[str, str] | None = None + + def to_dict(self) -> dict[str, Any]: + """Serialize to a dict matching the wire format.""" + d: dict[str, Any] = { + "id": self.id, + "from": self.from_, + "to": self.to, + "seq": self.seq, + "ack": self.ack, + "payload": self.payload, + "streamId": self.stream_id, + "controlFlags": self.control_flags, + } + if self.service_name is not None: + d["serviceName"] = self.service_name + if self.procedure_name is not None: + d["procedureName"] = self.procedure_name + if self.tracing is not None: + d["tracing"] = self.tracing + return d + + @classmethod + def from_dict(cls, d: dict[str, Any]) -> TransportMessage: + """Deserialize from a wire format dict.""" + return cls( + id=d["id"], + from_=d["from"], + to=d["to"], + seq=d["seq"], + ack=d["ack"], + payload=d["payload"], + stream_id=d["streamId"], + control_flags=d.get("controlFlags", 0), + service_name=d.get("serviceName"), + procedure_name=d.get("procedureName"), + tracing=d.get("tracing"), + ) + + +@dataclass +class PartialTransportMessage: + """A transport message missing id, from, to, seq, ack -- filled in by Session.""" + + payload: Any + stream_id: str + control_flags: int = 0 + service_name: str | None = None + procedure_name: str | None = None + tracing: dict[str, str] | None = None + + +# --- Result Types --- + +T = TypeVar("T") +E = TypeVar("E") + + +@dataclass +class OkResult(Generic[T]): + """Success result.""" + + payload: T + ok: bool = field(default=True, init=False) + + +@dataclass +class ErrResult(Generic[E]): + """Error result.""" + + payload: E + ok: bool = field(default=False, init=False) + + +Result = Union[OkResult[T], ErrResult[E]] + + +def Ok(payload: Any) -> OkResult: + """Create an Ok result.""" + return OkResult(payload=payload) + + +def Err(payload: Any) -> ErrResult: + """Create an Err result.""" + return ErrResult(payload=payload) + + +def ok_result(payload: Any) -> dict[str, Any]: + """Create an ok result dict for wire format.""" + return {"ok": True, "payload": payload} + + +def err_result(code: str, message: str, extras: Any = None) -> dict[str, Any]: + """Create an error result dict for wire format.""" + p: dict[str, Any] = {"code": code, "message": message} + if extras is not None: + p["extras"] = extras + return {"ok": False, "payload": p} + + +# --- Protocol Error Codes --- + +UNEXPECTED_DISCONNECT_CODE = "UNEXPECTED_DISCONNECT" +CANCEL_CODE = "CANCEL" +UNCAUGHT_ERROR_CODE = "UNCAUGHT_ERROR" +INVALID_REQUEST_CODE = "INVALID_REQUEST" + +# --- Protocol Version --- + +PROTOCOL_VERSION = "v2.0" + + +# --- Control Message Helpers --- + + +def handshake_request_payload( + session_id: str, + next_expected_seq: int, + next_sent_seq: int, + metadata: Any = None, +) -> dict[str, Any]: + """Create a handshake request payload.""" + payload: dict[str, Any] = { + "type": "HANDSHAKE_REQ", + "protocolVersion": PROTOCOL_VERSION, + "sessionId": session_id, + "expectedSessionState": { + "nextExpectedSeq": next_expected_seq, + "nextSentSeq": next_sent_seq, + }, + } + if metadata is not None: + payload["metadata"] = metadata + return payload + + +def handshake_response_ok(session_id: str) -> dict[str, Any]: + return { + "type": "HANDSHAKE_RESP", + "status": {"ok": True, "sessionId": session_id}, + } + + +def ack_payload() -> dict[str, str]: + """Heartbeat/ACK control payload.""" + return {"type": "ACK"} + + +def close_payload() -> dict[str, str]: + """Stream close control payload.""" + return {"type": "CLOSE"} + + +def close_stream_message(stream_id: str) -> PartialTransportMessage: + """Create a close stream partial message.""" + return PartialTransportMessage( + payload=close_payload(), + stream_id=stream_id, + control_flags=ControlFlags.StreamClosedBit, + ) + + +def cancel_message(stream_id: str, error_payload: dict) -> PartialTransportMessage: + """Create a cancel stream partial message.""" + return PartialTransportMessage( + payload=error_payload, + stream_id=stream_id, + control_flags=ControlFlags.StreamCancelBit, + ) + + +def heartbeat_message() -> PartialTransportMessage: + """Create a heartbeat partial message.""" + return PartialTransportMessage( + payload=ack_payload(), + stream_id="heartbeat", + control_flags=ControlFlags.AckBit, + ) + + +# --- Handshake Error Codes --- + +RETRIABLE_HANDSHAKE_CODES = frozenset({"SESSION_STATE_MISMATCH"}) +FATAL_HANDSHAKE_CODES = frozenset( + { + "MALFORMED_HANDSHAKE_META", + "MALFORMED_HANDSHAKE", + "PROTOCOL_VERSION_MISMATCH", + "REJECTED_BY_CUSTOM_HANDLER", + "REJECTED_UNSUPPORTED_CLIENT", + } +) diff --git a/python-client/tests/__init__.py b/python-client/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/python-client/tests/conftest.py b/python-client/tests/conftest.py new file mode 100644 index 00000000..98944317 --- /dev/null +++ b/python-client/tests/conftest.py @@ -0,0 +1,85 @@ +"""Pytest fixtures for River Python client tests. + +Manages the lifecycle of a TypeScript test server process that the +Python client connects to. +""" + +from __future__ import annotations + +import asyncio +import os +import re +import signal +import subprocess +import sys +import time +from typing import AsyncGenerator, Generator + +import pytest +import pytest_asyncio + + +SERVER_SCRIPT = os.path.join(os.path.dirname(__file__), "test_server.mjs") +RIVER_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..")) + + +@pytest.fixture(scope="session") +def event_loop(): + """Create an event loop for the entire test session.""" + loop = asyncio.new_event_loop() + yield loop + loop.close() + + +@pytest.fixture(scope="session") +def river_server_port() -> Generator[int, None, None]: + """Start the TypeScript test server and return the port it listens on. + + The server is started once for the entire test session and killed afterward. + """ + proc = subprocess.Popen( + ["node", SERVER_SCRIPT], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + cwd=RIVER_ROOT, + ) + + # Wait for the server to print the port + port = None + deadline = time.monotonic() + 30 # 30s timeout + assert proc.stdout is not None + while time.monotonic() < deadline: + line = proc.stdout.readline().decode("utf-8").strip() + if not line: + # Check if process died + if proc.poll() is not None: + stderr = proc.stderr.read().decode("utf-8") if proc.stderr else "" + raise RuntimeError( + f"Test server exited with code {proc.returncode}.\n" + f"stderr: {stderr}" + ) + time.sleep(0.1) + continue + m = re.match(r"RIVER_PORT=(\d+)", line) + if m: + port = int(m.group(1)) + break + + if port is None: + proc.kill() + raise RuntimeError("Failed to get port from test server within 30s") + + yield port + + # Cleanup: terminate the server + proc.send_signal(signal.SIGTERM) + try: + proc.wait(timeout=5) + except subprocess.TimeoutExpired: + proc.kill() + + +@pytest.fixture +def server_url(river_server_port: int) -> str: + """Return the WebSocket URL for the test server.""" + return f"ws://127.0.0.1:{river_server_port}" diff --git a/python-client/tests/test_e2e.py b/python-client/tests/test_e2e.py new file mode 100644 index 00000000..fe523150 --- /dev/null +++ b/python-client/tests/test_e2e.py @@ -0,0 +1,695 @@ +"""End-to-end tests for the River Python client. + +Tests the Python client against the TypeScript test server, covering +all four procedure types and core protocol behavior. +""" + +from __future__ import annotations + +import asyncio +import pytest + +from river.client import RiverClient +from river.transport import WebSocketClientTransport +from river.codec import NaiveJsonCodec + + +# -- helpers -- + + +async def make_client(server_url: str, **kwargs) -> RiverClient: + """Create a connected RiverClient.""" + transport = WebSocketClientTransport( + ws_url=server_url, + client_id=None, # auto-generate + server_id="SERVER", + codec=NaiveJsonCodec(), + connect_on_invoke=kwargs.get("connect_on_invoke", True), + eagerly_connect=kwargs.get("eagerly_connect", False), + ) + return RiverClient(transport, server_id="SERVER") + + +async def cleanup_client(client: RiverClient) -> None: + await client.transport.close() + + +# ===================================================================== +# RPC Tests +# ===================================================================== + + +class TestRpc: + @pytest.mark.asyncio + async def test_rpc_basic(self, server_url: str): + """Basic RPC call returns correct result.""" + client = await make_client(server_url) + try: + result = await client.rpc("test", "add", {"n": 3}) + assert result["ok"] is True + assert result["payload"]["result"] == 3 + finally: + await cleanup_client(client) + + @pytest.mark.asyncio + async def test_fallible_rpc_success(self, server_url: str): + """Fallible RPC returns Ok on valid input.""" + client = await make_client(server_url) + try: + result = await client.rpc("fallible", "divide", {"a": 10, "b": 2}) + assert result["ok"] is True + assert result["payload"]["result"] == 5.0 + finally: + await cleanup_client(client) + + @pytest.mark.asyncio + async def test_fallible_rpc_error(self, server_url: str): + """Fallible RPC returns Err with correct error code.""" + client = await make_client(server_url) + try: + result = await client.rpc("fallible", "divide", {"a": 10, "b": 0}) + assert result["ok"] is False + assert result["payload"]["code"] == "DIV_BY_ZERO" + finally: + await cleanup_client(client) + + @pytest.mark.asyncio + async def test_concurrent_rpcs(self, server_url: str): + """Multiple concurrent RPCs all complete correctly.""" + client = await make_client(server_url) + try: + tasks = [ + client.rpc("ordering", "add", {"n": i}) for i in range(10) + ] + results = await asyncio.gather(*tasks) + for i, result in enumerate(results): + assert result["ok"] is True + assert result["payload"]["n"] == i + finally: + await cleanup_client(client) + + +# ===================================================================== +# Stream Tests +# ===================================================================== + + +class TestStream: + @pytest.mark.asyncio + async def test_stream_basic(self, server_url: str): + """Stream echoes messages correctly, skipping ignored ones.""" + client = await make_client(server_url) + try: + stream = client.stream("test", "echo", {}) + + # Write messages + stream.req_writable.write({"msg": "hello", "ignore": False}) + stream.req_writable.write({"msg": "world", "ignore": False}) + stream.req_writable.write({"msg": "skip", "ignore": True}) + stream.req_writable.write({"msg": "end", "ignore": False}) + stream.req_writable.close() + + # Read responses + results = [] + async for msg in stream.res_readable: + results.append(msg) + + assert len(results) == 3 + assert results[0]["ok"] is True + assert results[0]["payload"]["response"] == "hello" + assert results[1]["payload"]["response"] == "world" + assert results[2]["payload"]["response"] == "end" + finally: + await cleanup_client(client) + + @pytest.mark.asyncio + async def test_stream_empty(self, server_url: str): + """Stream with immediate close returns no results.""" + client = await make_client(server_url) + try: + stream = client.stream("test", "echo", {}) + stream.req_writable.close() + + results = await stream.res_readable.collect() + assert len(results) == 0 + finally: + await cleanup_client(client) + + @pytest.mark.asyncio + async def test_stream_with_init_message(self, server_url: str): + """Stream handler receives the init message.""" + client = await make_client(server_url) + try: + stream = client.stream( + "test", "echoWithPrefix", {"prefix": "test"} + ) + stream.req_writable.write({"msg": "hello", "ignore": False}) + stream.req_writable.write({"msg": "world", "ignore": False}) + stream.req_writable.close() + + results = await stream.res_readable.collect() + assert len(results) == 2 + assert results[0]["payload"]["response"] == "test hello" + assert results[1]["payload"]["response"] == "test world" + finally: + await cleanup_client(client) + + @pytest.mark.asyncio + async def test_fallible_stream(self, server_url: str): + """Stream correctly propagates both Ok and Err results.""" + client = await make_client(server_url) + try: + stream = client.stream("fallible", "echo", {}) + + # Normal message + stream.req_writable.write( + {"msg": "hello", "throwResult": False, "throwError": False} + ) + done, msg = await stream.res_readable.next() + assert not done + assert msg["ok"] is True + assert msg["payload"]["response"] == "hello" + + # Error result (service-level error) + stream.req_writable.write( + {"msg": "fail", "throwResult": True, "throwError": False} + ) + done, msg = await stream.res_readable.next() + assert not done + assert msg["ok"] is False + assert msg["payload"]["code"] == "STREAM_ERROR" + + # Uncaught error (causes stream cancel) + stream.req_writable.write( + {"msg": "throw", "throwResult": False, "throwError": True} + ) + done, msg = await stream.res_readable.next() + assert not done + assert msg["ok"] is False + assert msg["payload"]["code"] == "UNCAUGHT_ERROR" + finally: + await cleanup_client(client) + + @pytest.mark.asyncio + async def test_concurrent_streams(self, server_url: str): + """Multiple concurrent streams work independently.""" + client = await make_client(server_url) + try: + streams = [] + for _ in range(5): + s = client.stream("test", "echo", {}) + streams.append(s) + + # Write to each stream + for i, s in enumerate(streams): + s.req_writable.write({"msg": f"msg-{i}", "ignore": False}) + s.req_writable.close() + + # Read from each stream + for i, s in enumerate(streams): + results = await s.res_readable.collect() + assert len(results) == 1 + assert results[0]["payload"]["response"] == f"msg-{i}" + finally: + await cleanup_client(client) + + +# ===================================================================== +# Subscription Tests +# ===================================================================== + + +class TestSubscription: + @pytest.mark.asyncio + async def test_subscription_basic(self, server_url: str): + """Subscription receives initial value and updates.""" + client = await make_client(server_url) + try: + sub = client.subscribe("subscribable", "value", {}) + + # Read initial value + done, msg = await sub.res_readable.next() + assert not done + assert msg["ok"] is True + initial_count = msg["payload"]["count"] + + # Trigger an update + add_result = await client.rpc("subscribable", "add", {"n": 1}) + assert add_result["ok"] is True + + # Read updated value + done, msg = await sub.res_readable.next() + assert not done + assert msg["ok"] is True + assert msg["payload"]["count"] == initial_count + 1 + finally: + await cleanup_client(client) + + +# ===================================================================== +# Upload Tests +# ===================================================================== + + +class TestUpload: + @pytest.mark.asyncio + async def test_upload_basic(self, server_url: str): + """Upload sums multiple values correctly.""" + client = await make_client(server_url) + try: + upload = client.upload("uploadable", "addMultiple", {}) + upload.req_writable.write({"n": 1}) + upload.req_writable.write({"n": 2}) + upload.req_writable.close() + + result = await upload.finalize() + assert result["ok"] is True + assert result["payload"]["result"] == 3 + finally: + await cleanup_client(client) + + @pytest.mark.asyncio + async def test_upload_empty(self, server_url: str): + """Upload with no data returns zero.""" + client = await make_client(server_url) + try: + upload = client.upload("uploadable", "addMultiple", {}) + upload.req_writable.close() + + result = await upload.finalize() + assert result["ok"] is True + assert result["payload"]["result"] == 0 + finally: + await cleanup_client(client) + + @pytest.mark.asyncio + async def test_upload_with_init_message(self, server_url: str): + """Upload handler receives the init message.""" + client = await make_client(server_url) + try: + upload = client.upload( + "uploadable", "addMultipleWithPrefix", {"prefix": "test"} + ) + upload.req_writable.write({"n": 1}) + upload.req_writable.write({"n": 2}) + upload.req_writable.close() + + result = await upload.finalize() + assert result["ok"] is True + assert result["payload"]["result"] == "test 3" + finally: + await cleanup_client(client) + + @pytest.mark.asyncio + async def test_upload_server_cancel(self, server_url: str): + """Upload receives server-initiated cancel when limit exceeded.""" + client = await make_client(server_url) + try: + upload = client.upload("uploadable", "cancellableAdd", {}) + upload.req_writable.write({"n": 9}) + upload.req_writable.write({"n": 1}) + # Don't close - server should cancel + + result = await upload.finalize() + assert result["ok"] is False + assert result["payload"]["code"] == "CANCEL" + finally: + await cleanup_client(client) + + +# ===================================================================== +# Disconnect Tests +# ===================================================================== + + +class TestDisconnect: + @pytest.mark.asyncio + async def test_rpc_on_closed_transport(self, server_url: str): + """RPC on a closed transport returns UNEXPECTED_DISCONNECT.""" + client = await make_client(server_url) + await client.transport.close() + + result = await client.rpc("test", "add", {"n": 1}) + assert result["ok"] is False + assert result["payload"]["code"] == "UNEXPECTED_DISCONNECT" + + @pytest.mark.asyncio + async def test_stream_on_closed_transport(self, server_url: str): + """Stream on a closed transport returns UNEXPECTED_DISCONNECT.""" + client = await make_client(server_url) + await client.transport.close() + + stream = client.stream("test", "echo", {}) + done, msg = await stream.res_readable.next() + assert not done + assert msg["ok"] is False + assert msg["payload"]["code"] == "UNEXPECTED_DISCONNECT" + + @pytest.mark.asyncio + async def test_upload_on_closed_transport(self, server_url: str): + """Upload on a closed transport returns UNEXPECTED_DISCONNECT.""" + client = await make_client(server_url) + await client.transport.close() + + upload = client.upload("uploadable", "addMultiple", {}) + assert not upload.req_writable.is_writable() + result = await upload.finalize() + assert result["ok"] is False + assert result["payload"]["code"] == "UNEXPECTED_DISCONNECT" + + @pytest.mark.asyncio + async def test_subscription_on_closed_transport(self, server_url: str): + """Subscription on a closed transport returns UNEXPECTED_DISCONNECT.""" + client = await make_client(server_url) + await client.transport.close() + + sub = client.subscribe("subscribable", "value", {}) + done, msg = await sub.res_readable.next() + assert not done + assert msg["ok"] is False + assert msg["payload"]["code"] == "UNEXPECTED_DISCONNECT" + + +# ===================================================================== +# Codec Tests +# ===================================================================== + + +class TestCodec: + @pytest.mark.asyncio + async def test_json_codec_rpc(self, server_url: str): + """JSON codec works for basic RPC.""" + transport = WebSocketClientTransport( + ws_url=server_url, + server_id="SERVER", + codec=NaiveJsonCodec(), + ) + client = RiverClient(transport, server_id="SERVER") + try: + result = await client.rpc("test", "add", {"n": 5}) + assert result["ok"] is True + finally: + await transport.close() + + @pytest.mark.asyncio + async def test_binary_codec_roundtrip(self): + """Binary (msgpack) codec encodes and decodes transport messages.""" + from river.codec import BinaryCodec, CodecMessageAdapter + from river.types import TransportMessage + + adapter = CodecMessageAdapter(BinaryCodec()) + msg = TransportMessage( + id="test123", + from_="client", + to="server", + seq=1, + ack=0, + payload={"data": "hello"}, + stream_id="s1", + control_flags=0, + ) + ok, buf = adapter.to_buffer(msg) + assert ok is True + ok, decoded = adapter.from_buffer(buf) + assert ok is True + assert decoded.payload == {"data": "hello"} + + +# ===================================================================== +# Stream Unit Tests +# ===================================================================== + + +class TestReadable: + @pytest.mark.asyncio + async def test_readable_close(self): + """Closing a readable makes it done.""" + from river.streams import Readable + + r: Readable = Readable() + r._trigger_close() + assert r.is_closed() + + @pytest.mark.asyncio + async def test_readable_iterate(self): + """Can iterate over pushed values.""" + from river.streams import Readable + + r: Readable = Readable() + r._push_value({"ok": True, "payload": 1}) + r._push_value({"ok": True, "payload": 2}) + r._trigger_close() + + results = await r.collect() + assert len(results) == 2 + assert results[0]["payload"] == 1 + assert results[1]["payload"] == 2 + + @pytest.mark.asyncio + async def test_readable_push_after_close_raises(self): + """Pushing to a closed readable raises.""" + from river.streams import Readable + + r: Readable = Readable() + r._trigger_close() + with pytest.raises(RuntimeError): + r._push_value({"ok": True, "payload": 1}) + + @pytest.mark.asyncio + async def test_readable_double_close_raises(self): + """Closing a readable twice raises.""" + from river.streams import Readable + + r: Readable = Readable() + r._trigger_close() + with pytest.raises(RuntimeError): + r._trigger_close() + + @pytest.mark.asyncio + async def test_readable_break(self): + """Breaking a readable stops iteration.""" + from river.streams import Readable + + r: Readable = Readable() + r._push_value({"ok": True, "payload": 1}) + r.break_() + + results = await r.collect() + assert len(results) == 1 + assert results[0]["ok"] is False + assert results[0]["payload"]["code"] == "READABLE_BROKEN" + + @pytest.mark.asyncio + async def test_readable_async_for(self): + """Works with async for loop.""" + from river.streams import Readable + + r: Readable = Readable() + r._push_value({"ok": True, "payload": "a"}) + r._push_value({"ok": True, "payload": "b"}) + r._trigger_close() + + values = [] + async for item in r: + values.append(item) + assert len(values) == 2 + + +class TestWritable: + def test_writable_write(self): + """Write callback is invoked.""" + from river.streams import Writable + + received = [] + w: Writable = Writable(write_cb=received.append) + w.write(1) + w.write(2) + assert received == [1, 2] + + def test_writable_close(self): + """Close callback is invoked once.""" + from river.streams import Writable + + close_count = [0] + w: Writable = Writable( + write_cb=lambda x: None, + close_cb=lambda: close_count.__setitem__(0, close_count[0] + 1), + ) + assert w.is_writable() + w.close() + assert not w.is_writable() + assert close_count[0] == 1 + + def test_writable_idempotent_close(self): + """Closing multiple times only invokes callback once.""" + from river.streams import Writable + + close_count = [0] + w: Writable = Writable( + write_cb=lambda x: None, + close_cb=lambda: close_count.__setitem__(0, close_count[0] + 1), + ) + w.close() + w.close() + w.close() + assert close_count[0] == 1 + + def test_writable_write_after_close_raises(self): + """Writing after close raises.""" + from river.streams import Writable + + w: Writable = Writable(write_cb=lambda x: None) + w.close() + with pytest.raises(RuntimeError): + w.write(42) + + def test_writable_close_with_value(self): + """Close with a final value writes it before closing.""" + from river.streams import Writable + + received = [] + w: Writable = Writable(write_cb=received.append) + w.close(42) + assert received == [42] + assert w.is_closed() + + +# ===================================================================== +# Types Unit Tests +# ===================================================================== + + +class TestTypes: + def test_generate_id_length(self): + """Generated IDs are 12 characters.""" + from river.types import generate_id + + for _ in range(100): + assert len(generate_id()) == 12 + + def test_generate_id_unique(self): + """Generated IDs are unique.""" + from river.types import generate_id + + ids = {generate_id() for _ in range(1000)} + assert len(ids) == 1000 + + def test_control_flags(self): + """Control flag bit operations work correctly.""" + from river.types import ( + ControlFlags, + is_ack, + is_stream_open, + is_stream_cancel, + is_stream_close, + ) + + assert is_ack(ControlFlags.AckBit) + assert not is_ack(0) + assert is_stream_open(ControlFlags.StreamOpenBit) + assert is_stream_close(ControlFlags.StreamClosedBit) + assert is_stream_cancel(ControlFlags.StreamCancelBit) + + # Combined flags + combined = ControlFlags.StreamOpenBit | ControlFlags.StreamClosedBit + assert is_stream_open(combined) + assert is_stream_close(combined) + assert not is_ack(combined) + + def test_transport_message_roundtrip(self): + """TransportMessage serializes and deserializes correctly.""" + from river.types import TransportMessage + + msg = TransportMessage( + id="test123", + from_="client1", + to="server1", + seq=5, + ack=3, + payload={"data": "hello"}, + stream_id="stream1", + control_flags=0, + service_name="myService", + procedure_name="myProc", + ) + d = msg.to_dict() + assert d["from"] == "client1" + assert d["to"] == "server1" + assert d["serviceName"] == "myService" + + msg2 = TransportMessage.from_dict(d) + assert msg2.from_ == "client1" + assert msg2.seq == 5 + assert msg2.service_name == "myService" + + +# ===================================================================== +# Codec Unit Tests +# ===================================================================== + + +class TestCodecUnit: + def test_json_codec_encode_decode(self): + """JSON codec round-trips correctly.""" + from river.codec import NaiveJsonCodec + + codec = NaiveJsonCodec() + obj = {"key": "value", "num": 42, "nested": {"a": [1, 2, 3]}} + buf = codec.to_buffer(obj) + assert isinstance(buf, bytes) + result = codec.from_buffer(buf) + assert result == obj + + def test_json_codec_bytes_handling(self): + """JSON codec handles bytes via base64.""" + from river.codec import NaiveJsonCodec + + codec = NaiveJsonCodec() + obj = {"data": b"\x00\x01\x02\xff"} + buf = codec.to_buffer(obj) + result = codec.from_buffer(buf) + assert result["data"] == b"\x00\x01\x02\xff" + + def test_binary_codec_encode_decode(self): + """Binary (msgpack) codec round-trips correctly.""" + from river.codec import BinaryCodec + + codec = BinaryCodec() + obj = {"key": "value", "num": 42, "nested": {"a": [1, 2, 3]}} + buf = codec.to_buffer(obj) + assert isinstance(buf, bytes) + result = codec.from_buffer(buf) + assert result == obj + + def test_codec_adapter_valid(self): + """CodecMessageAdapter encodes and decodes transport messages.""" + from river.codec import CodecMessageAdapter, NaiveJsonCodec + from river.types import TransportMessage + + adapter = CodecMessageAdapter(NaiveJsonCodec()) + msg = TransportMessage( + id="abc", + from_="c1", + to="s1", + seq=0, + ack=0, + payload={"type": "ACK"}, + stream_id="heartbeat", + control_flags=1, + ) + ok, buf = adapter.to_buffer(msg) + assert ok is True + + ok, result = adapter.from_buffer(buf) + assert ok is True + assert result.id == "abc" + assert result.from_ == "c1" + + def test_codec_adapter_invalid_buffer(self): + """CodecMessageAdapter returns error on invalid bytes.""" + from river.codec import CodecMessageAdapter, NaiveJsonCodec + + adapter = CodecMessageAdapter(NaiveJsonCodec()) + ok, result = adapter.from_buffer(b"not valid json") + assert ok is False + assert isinstance(result, str) diff --git a/python-client/tests/test_server.mjs b/python-client/tests/test_server.mjs new file mode 100644 index 00000000..e0a24f77 --- /dev/null +++ b/python-client/tests/test_server.mjs @@ -0,0 +1,291 @@ +/** + * Standalone test server for the Python River client test suite. + * Uses the built dist/ output so it works with plain Node.js. + * + * Usage: node python-client/tests/test_server.mjs + * (run from the river repo root after `npx tsup`) + */ +import http from 'node:http'; +import { WebSocket, WebSocketServer } from 'ws'; + +// We import from the built output in dist/ (paths relative to river repo root) +import { createServer, createServiceSchema, Procedure, Ok, Err } from '../../dist/router/index.js'; +import { WebSocketServerTransport } from '../../dist/transport/impls/ws/server.js'; +import { Type } from '@sinclair/typebox'; + +const ServiceSchema = createServiceSchema(); + +// ------------------------------------------------------------------- +// TestService +// ------------------------------------------------------------------- +let count = 0; + +const TestServiceSchema = ServiceSchema.define({ + add: Procedure.rpc({ + requestInit: Type.Object({ n: Type.Number() }), + responseData: Type.Object({ result: Type.Number() }), + responseError: Type.Never(), + async handler({ reqInit }) { + count += reqInit.n; + return Ok({ result: count }); + }, + }), + echo: Procedure.stream({ + requestInit: Type.Object({}), + requestData: Type.Object({ + msg: Type.String(), + ignore: Type.Optional(Type.Boolean()), + }), + responseData: Type.Object({ response: Type.String() }), + responseError: Type.Never(), + async handler({ reqReadable, resWritable }) { + for await (const result of reqReadable) { + if (!result.ok) break; + const val = result.payload; + if (val.ignore) continue; + resWritable.write(Ok({ response: val.msg })); + } + resWritable.close(); + }, + }), + echoWithPrefix: Procedure.stream({ + requestInit: Type.Object({ prefix: Type.String() }), + requestData: Type.Object({ + msg: Type.String(), + ignore: Type.Optional(Type.Boolean()), + }), + responseData: Type.Object({ response: Type.String() }), + responseError: Type.Never(), + async handler({ reqInit, reqReadable, resWritable }) { + for await (const result of reqReadable) { + if (!result.ok) break; + const val = result.payload; + if (val.ignore) continue; + resWritable.write(Ok({ response: `${reqInit.prefix} ${val.msg}` })); + } + resWritable.close(); + }, + }), +}); + +// ------------------------------------------------------------------- +// OrderingService +// ------------------------------------------------------------------- +const msgs = []; + +const OrderingServiceSchema = ServiceSchema.define({ + add: Procedure.rpc({ + requestInit: Type.Object({ n: Type.Number() }), + responseData: Type.Object({ n: Type.Number() }), + responseError: Type.Never(), + async handler({ reqInit }) { + msgs.push(reqInit.n); + return Ok({ n: reqInit.n }); + }, + }), + getAll: Procedure.rpc({ + requestInit: Type.Object({}), + responseData: Type.Object({ msgs: Type.Array(Type.Number()) }), + responseError: Type.Never(), + async handler() { + return Ok({ msgs: [...msgs] }); + }, + }), +}); + +// ------------------------------------------------------------------- +// FallibleService +// ------------------------------------------------------------------- +const FallibleServiceSchema = ServiceSchema.define({ + divide: Procedure.rpc({ + requestInit: Type.Object({ a: Type.Number(), b: Type.Number() }), + responseData: Type.Object({ result: Type.Number() }), + responseError: Type.Union([ + Type.Object({ + code: Type.Literal('DIV_BY_ZERO'), + message: Type.String(), + }), + Type.Object({ + code: Type.Literal('INFINITY'), + message: Type.String(), + }), + ]), + async handler({ reqInit }) { + if (reqInit.b === 0) { + return Err({ code: 'DIV_BY_ZERO', message: 'Cannot divide by zero' }); + } + const result = reqInit.a / reqInit.b; + if (!isFinite(result)) { + return Err({ code: 'INFINITY', message: 'Result is infinity' }); + } + return Ok({ result }); + }, + }), + echo: Procedure.stream({ + requestInit: Type.Object({}), + requestData: Type.Object({ + msg: Type.String(), + throwResult: Type.Optional(Type.Boolean()), + throwError: Type.Optional(Type.Boolean()), + }), + responseData: Type.Object({ response: Type.String() }), + responseError: Type.Object({ + code: Type.Literal('STREAM_ERROR'), + message: Type.String(), + }), + async handler({ reqReadable, resWritable }) { + for await (const result of reqReadable) { + if (!result.ok) break; + const val = result.payload; + if (val.throwError) { + throw new Error('uncaught error'); + } + if (val.throwResult) { + resWritable.write( + Err({ code: 'STREAM_ERROR', message: 'stream error' }), + ); + continue; + } + resWritable.write(Ok({ response: val.msg })); + } + resWritable.close(); + }, + }), +}); + +// ------------------------------------------------------------------- +// SubscribableService +// ------------------------------------------------------------------- +let subCount = 0; +const subListeners = new Set(); + +const SubscribableServiceSchema = ServiceSchema.define({ + add: Procedure.rpc({ + requestInit: Type.Object({ n: Type.Number() }), + responseData: Type.Object({ result: Type.Number() }), + responseError: Type.Never(), + async handler({ reqInit }) { + subCount += reqInit.n; + for (const l of subListeners) l(subCount); + return Ok({ result: subCount }); + }, + }), + value: Procedure.subscription({ + requestInit: Type.Object({}), + responseData: Type.Object({ count: Type.Number() }), + responseError: Type.Never(), + async handler({ resWritable, ctx }) { + const listener = (val) => { + resWritable.write(Ok({ count: val })); + }; + resWritable.write(Ok({ count: subCount })); + subListeners.add(listener); + ctx.signal.addEventListener('abort', () => { + subListeners.delete(listener); + resWritable.close(); + }); + }, + }), +}); + +// ------------------------------------------------------------------- +// UploadableService +// ------------------------------------------------------------------- +const UploadableServiceSchema = ServiceSchema.define({ + addMultiple: Procedure.upload({ + requestInit: Type.Object({}), + requestData: Type.Object({ n: Type.Number() }), + responseData: Type.Object({ result: Type.Number() }), + responseError: Type.Never(), + async handler({ reqReadable }) { + let total = 0; + for await (const result of reqReadable) { + if (!result.ok) break; + total += result.payload.n; + } + return Ok({ result: total }); + }, + }), + addMultipleWithPrefix: Procedure.upload({ + requestInit: Type.Object({ prefix: Type.String() }), + requestData: Type.Object({ n: Type.Number() }), + responseData: Type.Object({ result: Type.String() }), + responseError: Type.Never(), + async handler({ reqInit, reqReadable }) { + let total = 0; + for await (const result of reqReadable) { + if (!result.ok) break; + total += result.payload.n; + } + return Ok({ result: `${reqInit.prefix} ${total}` }); + }, + }), + cancellableAdd: Procedure.upload({ + requestInit: Type.Object({}), + requestData: Type.Object({ n: Type.Number() }), + responseData: Type.Object({ result: Type.Number() }), + responseError: Type.Object({ + code: Type.Literal('CANCEL'), + message: Type.String(), + }), + async handler({ reqReadable, ctx }) { + let total = 0; + for await (const result of reqReadable) { + if (!result.ok) break; + total += result.payload.n; + if (total >= 10) { + ctx.cancel(); + return Err({ code: 'CANCEL', message: 'total exceeds limit' }); + } + } + return Ok({ result: total }); + }, + }), +}); + +// ------------------------------------------------------------------- +// Boot +// ------------------------------------------------------------------- +const services = { + test: TestServiceSchema, + ordering: OrderingServiceSchema, + fallible: FallibleServiceSchema, + subscribable: SubscribableServiceSchema, + uploadable: UploadableServiceSchema, +}; + +async function main() { + const httpServer = http.createServer(); + const port = await new Promise((resolve, reject) => { + httpServer.listen(0, '127.0.0.1', () => { + const addr = httpServer.address(); + if (typeof addr === 'object' && addr) resolve(addr.port); + else reject(new Error("couldn't get port")); + }); + }); + + const wss = new WebSocketServer({ server: httpServer }); + const serverTransport = new WebSocketServerTransport(wss, 'SERVER'); + const _server = createServer(serverTransport, services); + + // Print port so the Python test can parse it + process.stdout.write(`RIVER_PORT=${port}\n`); + + process.on('SIGTERM', () => { + _server.close().then(() => { + httpServer.close(); + process.exit(0); + }); + }); + process.on('SIGINT', () => { + _server.close().then(() => { + httpServer.close(); + process.exit(0); + }); + }); +} + +main().catch((err) => { + console.error('Failed to start test server:', err); + process.exit(1); +}); From 0da3a59567d491b29444368fdf5a26515580f3c3 Mon Sep 17 00:00:00 2001 From: Bri <34875062+Monkatraz@users.noreply.github.com> Date: Thu, 19 Feb 2026 09:20:46 +0000 Subject: [PATCH 02/29] refactor: use esbuild to bundle test server from .ts source Replace the hand-written .mjs test server with the original .ts source. conftest.py now runs esbuild at test session start to bundle test_server.ts -> test_server.mjs (gitignored build artifact). This avoids tsx/ts-node runtime module resolution issues with the river repo's bundler-style tsconfig while keeping the test server as authored TypeScript. --- python-client/.gitignore | 2 + python-client/tests/conftest.py | 57 +++++++++++++---- .../tests/{test_server.mjs => test_server.ts} | 64 ++++++++++++------- 3 files changed, 88 insertions(+), 35 deletions(-) rename python-client/tests/{test_server.mjs => test_server.ts} (84%) diff --git a/python-client/.gitignore b/python-client/.gitignore index cba61eb6..befa7815 100644 --- a/python-client/.gitignore +++ b/python-client/.gitignore @@ -5,3 +5,5 @@ __pycache__/ .pytest_cache/ dist/ build/ +# esbuild build artifact (built from test_server.ts at test time) +tests/test_server.mjs diff --git a/python-client/tests/conftest.py b/python-client/tests/conftest.py index 98944317..4299e9d2 100644 --- a/python-client/tests/conftest.py +++ b/python-client/tests/conftest.py @@ -11,16 +11,46 @@ import re import signal import subprocess -import sys import time -from typing import AsyncGenerator, Generator +from typing import Generator import pytest -import pytest_asyncio -SERVER_SCRIPT = os.path.join(os.path.dirname(__file__), "test_server.mjs") -RIVER_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..")) +TESTS_DIR = os.path.dirname(__file__) +SERVER_TS = os.path.join(TESTS_DIR, "test_server.ts") +SERVER_MJS = os.path.join(TESTS_DIR, "test_server.mjs") +RIVER_ROOT = os.path.abspath(os.path.join(TESTS_DIR, "..", "..")) +ESBUILD = os.path.join(RIVER_ROOT, "node_modules", ".bin", "esbuild") + + +def _build_test_server() -> None: + """Bundle test_server.ts -> test_server.mjs using esbuild. + + esbuild handles the river repo's bundler-style module resolution at + build time, producing a single ESM file that plain ``node`` can run. + """ + result = subprocess.run( + [ + ESBUILD, + SERVER_TS, + "--bundle", + "--platform=node", + "--format=esm", + f"--outfile={SERVER_MJS}", + # keep heavy deps external so the bundle stays small and + # we reuse whatever is already in node_modules + "--external:ws", + "--external:@sinclair/typebox", + ], + cwd=RIVER_ROOT, + capture_output=True, + text=True, + ) + if result.returncode != 0: + raise RuntimeError( + f"esbuild failed ({result.returncode}):\n{result.stderr}" + ) @pytest.fixture(scope="session") @@ -33,12 +63,15 @@ def event_loop(): @pytest.fixture(scope="session") def river_server_port() -> Generator[int, None, None]: - """Start the TypeScript test server and return the port it listens on. + """Build and start the TypeScript test server, yield its port. - The server is started once for the entire test session and killed afterward. + The server is built once via esbuild and kept alive for the entire + test session. """ + _build_test_server() + proc = subprocess.Popen( - ["node", SERVER_SCRIPT], + ["node", SERVER_MJS], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=RIVER_ROOT, @@ -46,14 +79,15 @@ def river_server_port() -> Generator[int, None, None]: # Wait for the server to print the port port = None - deadline = time.monotonic() + 30 # 30s timeout + deadline = time.monotonic() + 30 assert proc.stdout is not None while time.monotonic() < deadline: line = proc.stdout.readline().decode("utf-8").strip() if not line: - # Check if process died if proc.poll() is not None: - stderr = proc.stderr.read().decode("utf-8") if proc.stderr else "" + stderr = ( + proc.stderr.read().decode("utf-8") if proc.stderr else "" + ) raise RuntimeError( f"Test server exited with code {proc.returncode}.\n" f"stderr: {stderr}" @@ -71,7 +105,6 @@ def river_server_port() -> Generator[int, None, None]: yield port - # Cleanup: terminate the server proc.send_signal(signal.SIGTERM) try: proc.wait(timeout=5) diff --git a/python-client/tests/test_server.mjs b/python-client/tests/test_server.ts similarity index 84% rename from python-client/tests/test_server.mjs rename to python-client/tests/test_server.ts index e0a24f77..5f13bf69 100644 --- a/python-client/tests/test_server.mjs +++ b/python-client/tests/test_server.ts @@ -1,22 +1,28 @@ /** * Standalone test server for the Python River client test suite. - * Uses the built dist/ output so it works with plain Node.js. * - * Usage: node python-client/tests/test_server.mjs - * (run from the river repo root after `npx tsup`) + * Starts a WebSocket server with the standard test services and prints + * the port to stdout so the Python test harness can connect. + * + * Usage (from river repo root): + * npx tsx --tsconfig python-client/tsconfig.tsx.json python-client/tests/test_server.ts */ import http from 'node:http'; -import { WebSocket, WebSocketServer } from 'ws'; - -// We import from the built output in dist/ (paths relative to river repo root) -import { createServer, createServiceSchema, Procedure, Ok, Err } from '../../dist/router/index.js'; -import { WebSocketServerTransport } from '../../dist/transport/impls/ws/server.js'; +import { WebSocketServer } from 'ws'; +import { WebSocketServerTransport } from '../../transport/impls/ws/server'; +import { + createServer, + createServiceSchema, + Procedure, + Ok, + Err, +} from '../../router'; import { Type } from '@sinclair/typebox'; const ServiceSchema = createServiceSchema(); // ------------------------------------------------------------------- -// TestService +// TestService – mirrors the TS TestServiceSchema // ------------------------------------------------------------------- let count = 0; @@ -69,9 +75,9 @@ const TestServiceSchema = ServiceSchema.define({ }); // ------------------------------------------------------------------- -// OrderingService +// OrderingService – for message ordering tests // ------------------------------------------------------------------- -const msgs = []; +const msgs: number[] = []; const OrderingServiceSchema = ServiceSchema.define({ add: Procedure.rpc({ @@ -94,7 +100,7 @@ const OrderingServiceSchema = ServiceSchema.define({ }); // ------------------------------------------------------------------- -// FallibleService +// FallibleService – service-level errors // ------------------------------------------------------------------- const FallibleServiceSchema = ServiceSchema.define({ divide: Procedure.rpc({ @@ -112,11 +118,17 @@ const FallibleServiceSchema = ServiceSchema.define({ ]), async handler({ reqInit }) { if (reqInit.b === 0) { - return Err({ code: 'DIV_BY_ZERO', message: 'Cannot divide by zero' }); + return Err({ + code: 'DIV_BY_ZERO' as const, + message: 'Cannot divide by zero', + }); } const result = reqInit.a / reqInit.b; if (!isFinite(result)) { - return Err({ code: 'INFINITY', message: 'Result is infinity' }); + return Err({ + code: 'INFINITY' as const, + message: 'Result is infinity', + }); } return Ok({ result }); }, @@ -142,7 +154,7 @@ const FallibleServiceSchema = ServiceSchema.define({ } if (val.throwResult) { resWritable.write( - Err({ code: 'STREAM_ERROR', message: 'stream error' }), + Err({ code: 'STREAM_ERROR' as const, message: 'stream error' }), ); continue; } @@ -154,10 +166,11 @@ const FallibleServiceSchema = ServiceSchema.define({ }); // ------------------------------------------------------------------- -// SubscribableService +// SubscribableService – subscriptions // ------------------------------------------------------------------- let subCount = 0; -const subListeners = new Set(); +type SubListener = (val: number) => void; +const subListeners = new Set(); const SubscribableServiceSchema = ServiceSchema.define({ add: Procedure.rpc({ @@ -175,9 +188,10 @@ const SubscribableServiceSchema = ServiceSchema.define({ responseData: Type.Object({ count: Type.Number() }), responseError: Type.Never(), async handler({ resWritable, ctx }) { - const listener = (val) => { + const listener: SubListener = (val) => { resWritable.write(Ok({ count: val })); }; + // Send initial value resWritable.write(Ok({ count: subCount })); subListeners.add(listener); ctx.signal.addEventListener('abort', () => { @@ -189,7 +203,7 @@ const SubscribableServiceSchema = ServiceSchema.define({ }); // ------------------------------------------------------------------- -// UploadableService +// UploadableService – uploads // ------------------------------------------------------------------- const UploadableServiceSchema = ServiceSchema.define({ addMultiple: Procedure.upload({ @@ -235,7 +249,10 @@ const UploadableServiceSchema = ServiceSchema.define({ total += result.payload.n; if (total >= 10) { ctx.cancel(); - return Err({ code: 'CANCEL', message: 'total exceeds limit' }); + return Err({ + code: 'CANCEL' as const, + message: 'total exceeds limit', + }); } } return Ok({ result: total }); @@ -244,7 +261,7 @@ const UploadableServiceSchema = ServiceSchema.define({ }); // ------------------------------------------------------------------- -// Boot +// Boot the server // ------------------------------------------------------------------- const services = { test: TestServiceSchema, @@ -256,7 +273,7 @@ const services = { async function main() { const httpServer = http.createServer(); - const port = await new Promise((resolve, reject) => { + const port = await new Promise((resolve, reject) => { httpServer.listen(0, '127.0.0.1', () => { const addr = httpServer.address(); if (typeof addr === 'object' && addr) resolve(addr.port); @@ -268,9 +285,10 @@ async function main() { const serverTransport = new WebSocketServerTransport(wss, 'SERVER'); const _server = createServer(serverTransport, services); - // Print port so the Python test can parse it + // Signal that the server is ready by printing the port process.stdout.write(`RIVER_PORT=${port}\n`); + // Keep the server alive process.on('SIGTERM', () => { _server.close().then(() => { httpServer.close(); From 5d2c62efc68cb2ff69597f4c28217fc893c8890f Mon Sep 17 00:00:00 2001 From: Bri <34875062+Monkatraz@users.noreply.github.com> Date: Thu, 19 Feb 2026 09:41:51 +0000 Subject: [PATCH 03/29] test: expand test suite to 65 tests with cancellation, idempotent close, and Readable edge cases - Add CancellationService to test server with blocking + immediate handlers - Add client-initiated cancellation tests for all 4 proc types (rpc, stream, upload, subscription) - Add idempotent close tests (stream, subscription) and cancellation-after-transport-close - Add eagerly-connect E2E test - Add 17 new Readable unit tests: locking semantics, eager iteration, collect-waits-for-close, break variants - Fix Readable to enforce locking (TypeError on double __aiter__/collect) - Replace async generator iterator with _ReadableIterator class for synchronous break cleanup - Fix break() to be no-op when stream already closed with empty queue --- python-client/river/streams.py | 76 +++- python-client/tests/test_e2e.py | 543 ++++++++++++++++++++++++++++- python-client/tests/test_server.ts | 115 ++++++ 3 files changed, 721 insertions(+), 13 deletions(-) diff --git a/python-client/river/streams.py b/python-client/river/streams.py index 03bfd538..cbca9601 100644 --- a/python-client/river/streams.py +++ b/python-client/river/streams.py @@ -55,20 +55,35 @@ def is_closed(self) -> bool: """Whether the stream has been closed.""" return self._closed and len(self._queue) == 0 + def _has_values_in_queue(self) -> bool: + """Whether there are buffered values waiting to be consumed.""" + return len(self._queue) > 0 + def break_(self) -> None: - """Break the stream, discarding all queued values.""" + """Break the stream, discarding all queued values. + + If the stream is already closed and the queue is empty, + this is a no-op (the stream is already done). + """ if self._locked and self._broken: return - self._broken = True self._locked = True + # If stream is already done (closed + empty), don't signal broken + if self._closed and len(self._queue) == 0: + self._notify_waiters() + return + self._broken = True self._queue.clear() self._notify_waiters() async def collect(self) -> list[T]: """Consume all values from the stream until it closes. - Locks the stream. Returns the list of all values. + Locks the stream. Raises TypeError if already locked. + Returns the list of all values. """ + if self._locked: + raise TypeError("Readable is already locked") self._locked = True results: list[T] = [] async for item in self._iterate(): @@ -107,12 +122,59 @@ async def _iterate(self): await fut def __aiter__(self): + if self._locked: + raise TypeError("Readable is already locked") self._locked = True - return self._async_iter_impl() + return _ReadableIterator(self) - async def _async_iter_impl(self): - async for item in self._iterate(): - yield item + +class _ReadableIterator: + """Async iterator for Readable that cleans up on break/close. + + Unlike an async generator, this class handles ``__del__`` + synchronously, ensuring the queue is cleared when a for-await + loop breaks out. + """ + + def __init__(self, readable: Readable) -> None: + self._readable = readable + self._done = False + + def __aiter__(self): + return self + + async def __anext__(self): + if self._done: + raise StopAsyncIteration + + r = self._readable + while True: + if r._broken: + val = { + "ok": False, + "payload": { + "code": "READABLE_BROKEN", + "message": "stream was broken", + }, + } + # After yielding the broken error, the iterator is done + self._done = True + return val + + if r._queue: + return r._queue.pop(0) + + if r._closed: + raise StopAsyncIteration + + loop = asyncio.get_event_loop() + fut: asyncio.Future[None] = loop.create_future() + r._waiters.append(fut) + await fut + + def __del__(self): + # Synchronous cleanup when the iterator is GC'd (e.g. break in for-await) + self._readable._queue.clear() class Writable(Generic[T]): diff --git a/python-client/tests/test_e2e.py b/python-client/tests/test_e2e.py index fe523150..de5ae8b5 100644 --- a/python-client/tests/test_e2e.py +++ b/python-client/tests/test_e2e.py @@ -370,6 +370,233 @@ async def test_subscription_on_closed_transport(self, server_url: str): assert msg["payload"]["code"] == "UNEXPECTED_DISCONNECT" +# ===================================================================== +# Client-Initiated Cancellation Tests +# ===================================================================== + + +class TestClientCancellation: + """Tests for client-initiated cancellation via abort signal. + + Uses the cancel.blocking* handlers on the test server which never resolve, + allowing us to test that the client abort properly sends CANCEL and + receives the CANCEL result. + """ + + @pytest.mark.asyncio + async def test_cancel_rpc(self, server_url: str): + """Client abort on RPC returns CANCEL error.""" + client = await make_client(server_url) + try: + abort_evt = asyncio.Event() + + async def do_abort(): + await asyncio.sleep(0.2) + abort_evt.set() + + asyncio.ensure_future(do_abort()) + result = await client.rpc( + "cancel", "blockingRpc", {}, abort_signal=abort_evt + ) + assert result["ok"] is False + assert result["payload"]["code"] == "CANCEL" + finally: + await cleanup_client(client) + + @pytest.mark.asyncio + async def test_cancel_stream(self, server_url: str): + """Client abort on stream returns CANCEL error.""" + client = await make_client(server_url) + try: + abort_evt = asyncio.Event() + stream = client.stream( + "cancel", "blockingStream", {}, abort_signal=abort_evt + ) + # Give server time to receive and process the init message + await asyncio.sleep(0.2) + abort_evt.set() + await asyncio.sleep(0) + + results = await stream.res_readable.collect() + assert len(results) == 1 + assert results[0]["ok"] is False + assert results[0]["payload"]["code"] == "CANCEL" + assert not stream.req_writable.is_writable() + finally: + await cleanup_client(client) + + @pytest.mark.asyncio + async def test_cancel_upload(self, server_url: str): + """Client abort on upload returns CANCEL error.""" + client = await make_client(server_url) + try: + abort_evt = asyncio.Event() + upload = client.upload( + "cancel", "blockingUpload", {}, abort_signal=abort_evt + ) + # Give server time to receive + await asyncio.sleep(0.2) + abort_evt.set() + + result = await upload.finalize() + assert result["ok"] is False + assert result["payload"]["code"] == "CANCEL" + assert not upload.req_writable.is_writable() + finally: + await cleanup_client(client) + + @pytest.mark.asyncio + async def test_cancel_subscription(self, server_url: str): + """Client abort on subscription returns CANCEL error.""" + client = await make_client(server_url) + try: + abort_evt = asyncio.Event() + sub = client.subscribe( + "cancel", "blockingSubscription", {}, abort_signal=abort_evt + ) + # Give server time to receive + await asyncio.sleep(0.2) + abort_evt.set() + await asyncio.sleep(0) + + done, msg = await sub.res_readable.next() + assert not done + assert msg["ok"] is False + assert msg["payload"]["code"] == "CANCEL" + finally: + await cleanup_client(client) + + +# ===================================================================== +# Idempotent Close / Post-Close Safety Tests +# ===================================================================== + + +class TestIdempotentClose: + """Tests that closing/aborting after completion is a safe no-op.""" + + @pytest.mark.asyncio + async def test_stream_idempotent_close(self, server_url: str): + """Closing and aborting a stream after it finished is safe.""" + client = await make_client(server_url) + try: + abort_evt = asyncio.Event() + stream = client.stream( + "test", "echo", {}, abort_signal=abort_evt + ) + stream.req_writable.write({"msg": "abc", "ignore": False}) + stream.req_writable.close() + + done, msg = await stream.res_readable.next() + assert not done + assert msg["ok"] is True + assert msg["payload"]["response"] == "abc" + + # Wait for server close to be received + await asyncio.sleep(0.1) + + # Abort after stream completed - should be a no-op + abort_evt.set() + await asyncio.sleep(0.05) + + # Drain any remaining messages - should be done or at most a cancel + done, val = await stream.res_readable.next() + # Either the stream is done, or we got a cancel (both ok) + if not done: + assert val["ok"] is False + + # "Accidentally" close again - no crash + stream.req_writable.close() + abort_evt.set() + finally: + await cleanup_client(client) + + @pytest.mark.asyncio + async def test_subscription_idempotent_close(self, server_url: str): + """Aborting a subscription after it was already aborted is safe.""" + client = await make_client(server_url) + try: + abort_evt = asyncio.Event() + sub = client.subscribe( + "subscribable", "value", {}, abort_signal=abort_evt + ) + # Read initial value + done, msg = await sub.res_readable.next() + assert not done + assert msg["ok"] is True + + # Abort + abort_evt.set() + await asyncio.sleep(0.05) + + # Read the cancel + done, msg = await sub.res_readable.next() + assert not done + assert msg["ok"] is False + assert msg["payload"]["code"] == "CANCEL" + + # "Accidentally" abort again + abort_evt.set() + finally: + await cleanup_client(client) + + @pytest.mark.asyncio + async def test_cancellation_after_transport_close(self, server_url: str): + """Closing/aborting after transport close doesn't crash.""" + client = await make_client(server_url) + try: + abort_evt = asyncio.Event() + stream = client.stream( + "test", "echo", {}, abort_signal=abort_evt + ) + stream.req_writable.write({"msg": "1", "ignore": False}) + done, msg = await stream.res_readable.next() + assert not done + assert msg["payload"]["response"] == "1" + + # Close the transport + await client.transport.close() + await asyncio.sleep(0.05) + + # Closing writable after transport close should be safe + stream.req_writable.close() + # Aborting after transport close should be safe + abort_evt.set() + await asyncio.sleep(0.05) + # No crash = success + finally: + # Transport already closed + pass + + +# ===================================================================== +# Eagerly Connect Test +# ===================================================================== + + +class TestEagerConnect: + @pytest.mark.asyncio + async def test_eagerly_connect(self, server_url: str): + """eagerlyConnect creates a connection before any procedure call.""" + transport = WebSocketClientTransport( + ws_url=server_url, + server_id="SERVER", + codec=NaiveJsonCodec(), + eagerly_connect=True, + ) + client = RiverClient(transport, server_id="SERVER", eagerly_connect=True) + try: + # Wait for the connection to be established + await asyncio.sleep(0.5) + # Should have a session now + assert len(transport.sessions) > 0 + # Verify the connection works by making a call + result = await client.rpc("test", "add", {"n": 1}) + assert result["ok"] is True + finally: + await transport.close() + + # ===================================================================== # Codec Tests # ===================================================================== @@ -467,17 +694,21 @@ async def test_readable_double_close_raises(self): @pytest.mark.asyncio async def test_readable_break(self): - """Breaking a readable stops iteration.""" + """Breaking a readable yields broken error on next read.""" from river.streams import Readable r: Readable = Readable() r._push_value({"ok": True, "payload": 1}) + # Grab iterator before break (since break locks the stream) + done, val = await r.next() + assert not done + assert val["payload"] == 1 r.break_() - - results = await r.collect() - assert len(results) == 1 - assert results[0]["ok"] is False - assert results[0]["payload"]["code"] == "READABLE_BROKEN" + done, val = await r.next() + assert not done + assert val["ok"] is False + assert val["payload"]["code"] == "READABLE_BROKEN" + r._trigger_close() @pytest.mark.asyncio async def test_readable_async_for(self): @@ -628,6 +859,306 @@ def test_transport_message_roundtrip(self): # ===================================================================== +class TestReadableLocking: + """Tests for Readable stream locking semantics (mirrors TS streams.test.ts).""" + + @pytest.mark.asyncio + async def test_lock_on_aiter(self): + """__aiter__ locks the stream; second call raises TypeError.""" + from river.streams import Readable + + r: Readable = Readable() + r.__aiter__() + assert not r.is_readable() + with pytest.raises(TypeError): + r.__aiter__() + r._trigger_close() + + @pytest.mark.asyncio + async def test_lock_on_collect(self): + """collect() locks the stream; __aiter__ raises TypeError.""" + from river.streams import Readable + + r: Readable = Readable() + # Don't await - just start collect (it will block waiting for close) + collect_task = asyncio.ensure_future(r.collect()) + await asyncio.sleep(0) # yield to let collect start + assert not r.is_readable() + with pytest.raises(TypeError): + r.__aiter__() + r._trigger_close() + await collect_task + + @pytest.mark.asyncio + async def test_lock_on_break(self): + """break_() locks the stream; __aiter__ raises TypeError.""" + from river.streams import Readable + + r: Readable = Readable() + r.break_() + assert not r.is_readable() + with pytest.raises(TypeError): + r.__aiter__() + r._trigger_close() + + @pytest.mark.asyncio + async def test_raw_iter_from_aiter(self): + """Can use the raw iterator from __aiter__.""" + from river.streams import Readable + + r: Readable = Readable() + it = r.__aiter__() + next_p = it.__anext__() + r._push_value({"ok": True, "payload": 1}) + val = await next_p + assert val == {"ok": True, "payload": 1} + next_p2 = it.__anext__() + r._trigger_close() + with pytest.raises(StopAsyncIteration): + await next_p2 + + +class TestReadableIteration: + """Tests for Readable iteration edge cases (mirrors TS streams.test.ts).""" + + @pytest.mark.asyncio + async def test_values_pushed_before_close(self): + """Can iterate values that were pushed before close.""" + from river.streams import Readable + + r: Readable = Readable() + r._push_value({"ok": True, "payload": 1}) + r._push_value({"ok": True, "payload": 2}) + r._push_value({"ok": True, "payload": 3}) + r._trigger_close() + done, val = await r.next() + assert not done and val["payload"] == 1 + done, val = await r.next() + assert not done and val["payload"] == 2 + done, val = await r.next() + assert not done and val["payload"] == 3 + done, val = await r.next() + assert done + + @pytest.mark.asyncio + async def test_eager_iteration(self): + """Read before push resolves in order.""" + from river.streams import Readable + + r: Readable = Readable() + # Start reading before values are pushed + t1 = asyncio.ensure_future(r.next()) + t2 = asyncio.ensure_future(r.next()) + # Give tasks a chance to start waiting + await asyncio.sleep(0) + r._push_value({"ok": True, "payload": 1}) + r._push_value({"ok": True, "payload": 2}) + done1, val1 = await t1 + done2, val2 = await t2 + assert not done1 and val1["payload"] == 1 + assert not done2 and val2["payload"] == 2 + # Third read + close + t3 = asyncio.ensure_future(r.next()) + await asyncio.sleep(0) + r._push_value({"ok": True, "payload": 3}) + r._trigger_close() + done3, val3 = await t3 + assert not done3 and val3["payload"] == 3 + done4, _ = await r.next() + assert done4 + + @pytest.mark.asyncio + async def test_not_resolve_until_push(self): + """Pending next() doesn't resolve until push or close.""" + from river.streams import Readable + + r: Readable = Readable() + next_p = asyncio.ensure_future(r.next()) + # Should not resolve yet + result = await asyncio.wait_for( + asyncio.shield(next_p), timeout=0.01 + ) if False else None + done = next_p.done() + assert not done, "next() should not resolve before push" + + r._push_value({"ok": True, "payload": 1}) + await asyncio.sleep(0) + done_v, val = await next_p + assert not done_v and val["payload"] == 1 + + # isDone should not resolve until close + done_p = asyncio.ensure_future(r.next()) + await asyncio.sleep(0.01) + assert not done_p.done(), "next() should not resolve before close" + r._trigger_close() + done_v2, _ = await done_p + assert done_v2 + + @pytest.mark.asyncio + async def test_collect_after_close(self): + """collect() returns all values when called after close.""" + from river.streams import Readable + + r: Readable = Readable() + r._push_value({"ok": True, "payload": 1}) + r._push_value({"ok": True, "payload": 2}) + r._push_value({"ok": True, "payload": 3}) + r._trigger_close() + results = await r.collect() + assert len(results) == 3 + assert [v["payload"] for v in results] == [1, 2, 3] + + @pytest.mark.asyncio + async def test_collect_waits_for_close(self): + """collect() doesn't resolve until the stream is closed.""" + from river.streams import Readable + + r: Readable = Readable() + r._push_value({"ok": True, "payload": 1}) + collect_task = asyncio.ensure_future(r.collect()) + r._push_value({"ok": True, "payload": 2}) + r._push_value({"ok": True, "payload": 3}) + await asyncio.sleep(0.01) + assert not collect_task.done(), "collect should not resolve before close" + r._push_value({"ok": True, "payload": 4}) + r._trigger_close() + results = await collect_task + assert len(results) == 4 + assert [v["payload"] for v in results] == [1, 2, 3, 4] + + @pytest.mark.asyncio + async def test_async_for_with_break(self): + """Breaking out of async for mid-stream stops iteration.""" + from river.streams import Readable + + r: Readable = Readable() + r._push_value({"ok": True, "payload": 1}) + r._push_value({"ok": True, "payload": 2}) + assert r._has_values_in_queue() + values = [] + async for item in r: + values.append(item) + assert r._has_values_in_queue() + break + # After break, remaining values should be discarded (broken) + assert not r._has_values_in_queue() + + @pytest.mark.asyncio + async def test_error_results_in_iteration(self): + """Error results are yielded as part of iteration.""" + from river.streams import Readable + + r: Readable = Readable() + r._push_value({"ok": True, "payload": 1}) + r._push_value({"ok": True, "payload": 2}) + r._push_value( + {"ok": False, "payload": {"code": "SOME_ERROR", "message": "err"}} + ) + r._trigger_close() + results = [] + async for item in r: + results.append(item) + assert len(results) == 3 + assert results[0]["ok"] is True + assert results[1]["ok"] is True + assert results[2]["ok"] is False + assert results[2]["payload"]["code"] == "SOME_ERROR" + + +class TestReadableBreakVariants: + """Tests for Readable break() edge cases (mirrors TS streams.test.ts).""" + + @pytest.mark.asyncio + async def test_break_signals_next(self): + """break() signals the next read call.""" + from river.streams import Readable + + r: Readable = Readable() + r.break_() + done, val = await r.next() + assert not done + assert val["ok"] is False + assert val["payload"]["code"] == "READABLE_BROKEN" + r._trigger_close() + + @pytest.mark.asyncio + async def test_break_signals_pending(self): + """break() signals a pending read.""" + from river.streams import Readable + + r: Readable = Readable() + pending = asyncio.ensure_future(r.next()) + await asyncio.sleep(0) + r.break_() + done, val = await pending + assert not done + assert val["ok"] is False + assert val["payload"]["code"] == "READABLE_BROKEN" + r._trigger_close() + + @pytest.mark.asyncio + async def test_break_with_queued_value(self): + """break() clears queue and yields broken error.""" + from river.streams import Readable + + r: Readable = Readable() + r._push_value({"ok": True, "payload": 1}) + assert r._has_values_in_queue() + r.break_() + assert not r._has_values_in_queue() + done, val = await r.next() + assert not done + assert val["payload"]["code"] == "READABLE_BROKEN" + r._trigger_close() + + @pytest.mark.asyncio + async def test_break_with_queued_value_after_close(self): + """break() after close with queued values still yields broken error.""" + from river.streams import Readable + + r: Readable = Readable() + r._push_value({"ok": True, "payload": 1}) + r._trigger_close() + r.break_() + done, val = await r.next() + assert not done + assert val["payload"]["code"] == "READABLE_BROKEN" + + @pytest.mark.asyncio + async def test_break_empty_queue_after_close(self): + """break() after close with empty queue -> done.""" + from river.streams import Readable + + r: Readable = Readable() + r._trigger_close() + r.break_() + done, _ = await r.next() + assert done + + @pytest.mark.asyncio + async def test_break_ends_iteration_midstream(self): + """break() during async for ends iteration.""" + from river.streams import Readable + + r: Readable = Readable() + r._push_value({"ok": True, "payload": 1}) + r._push_value({"ok": True, "payload": 2}) + r._push_value({"ok": True, "payload": 3}) + + results = [] + i = 0 + async for item in r: + if i == 0: + assert item["payload"] == 1 + r.break_() + elif i == 1: + assert item["ok"] is False + assert item["payload"]["code"] == "READABLE_BROKEN" + results.append(item) + i += 1 + assert i == 2 + + class TestCodecUnit: def test_json_codec_encode_decode(self): """JSON codec round-trips correctly.""" diff --git a/python-client/tests/test_server.ts b/python-client/tests/test_server.ts index 5f13bf69..4f5ab982 100644 --- a/python-client/tests/test_server.ts +++ b/python-client/tests/test_server.ts @@ -260,6 +260,120 @@ const UploadableServiceSchema = ServiceSchema.define({ }), }); +// ------------------------------------------------------------------- +// CancellationService – handlers that block forever for cancel tests +// ------------------------------------------------------------------- +const CancellationServiceSchema = ServiceSchema.define({ + blockingRpc: Procedure.rpc({ + requestInit: Type.Object({}), + responseData: Type.Object({}), + responseError: Type.Never(), + async handler({ ctx }) { + // Block until cancelled + return new Promise((resolve) => { + ctx.signal.addEventListener('abort', () => { + // Handler will be cancelled by the framework, nothing to resolve + }); + }); + }, + }), + blockingStream: Procedure.stream({ + requestInit: Type.Object({}), + requestData: Type.Object({}), + responseData: Type.Object({}), + responseError: Type.Never(), + async handler({ ctx }) { + return new Promise(() => { + // never resolves + }); + }, + }), + blockingUpload: Procedure.upload({ + requestInit: Type.Object({}), + requestData: Type.Object({}), + responseData: Type.Object({}), + responseError: Type.Never(), + async handler({ ctx }) { + return new Promise(() => { + // never resolves + }); + }, + }), + blockingSubscription: Procedure.subscription({ + requestInit: Type.Object({}), + responseData: Type.Object({}), + responseError: Type.Never(), + async handler({ ctx }) { + return new Promise(() => { + // never resolves + }); + }, + }), + // RPC that resolves normally (for clean handler cancellation) + immediateRpc: Procedure.rpc({ + requestInit: Type.Object({}), + responseData: Type.Object({ done: Type.Boolean() }), + responseError: Type.Never(), + async handler() { + return Ok({ done: true }); + }, + }), + // Stream that writes one response and closes (for clean handler cancel) + immediateStream: Procedure.stream({ + requestInit: Type.Object({}), + requestData: Type.Object({}), + responseData: Type.Object({ done: Type.Boolean() }), + responseError: Type.Never(), + async handler({ reqReadable, resWritable }) { + resWritable.write(Ok({ done: true })); + for await (const result of reqReadable) { + if (!result.ok) break; + } + resWritable.close(); + }, + }), + // Upload that resolves immediately + immediateUpload: Procedure.upload({ + requestInit: Type.Object({}), + requestData: Type.Object({}), + responseData: Type.Object({ done: Type.Boolean() }), + responseError: Type.Never(), + async handler({ reqReadable }) { + for await (const result of reqReadable) { + if (!result.ok) break; + } + return Ok({ done: true }); + }, + }), + // Subscription that closes immediately + immediateSubscription: Procedure.subscription({ + requestInit: Type.Object({}), + responseData: Type.Object({ done: Type.Boolean() }), + responseError: Type.Never(), + async handler({ resWritable }) { + resWritable.write(Ok({ done: true })); + resWritable.close(); + }, + }), + // Stream that sends N responses then closes (for idempotent close tests) + countedStream: Procedure.stream({ + requestInit: Type.Object({ total: Type.Number() }), + requestData: Type.Object({}), + responseData: Type.Object({ i: Type.Number() }), + responseError: Type.Never(), + async handler({ reqInit, reqReadable, resWritable }) { + for (let i = 0; i < reqInit.total; i++) { + resWritable.write(Ok({ i })); + } + // Wait for client to close the request stream + for await (const result of reqReadable) { + if (!result.ok) break; + } + resWritable.close(); + }, + }), +}); + // ------------------------------------------------------------------- // Boot the server // ------------------------------------------------------------------- @@ -269,6 +383,7 @@ const services = { fallible: FallibleServiceSchema, subscribable: SubscribableServiceSchema, uploadable: UploadableServiceSchema, + cancel: CancellationServiceSchema, }; async function main() { From 52a9e20ab5f7707ebc77524c9ecfab488ebeead0 Mon Sep 17 00:00:00 2001 From: Jacky Zhao Date: Mon, 2 Mar 2026 14:23:56 -0800 Subject: [PATCH 04/29] ci properly --- .github/workflows/ci.yml | 50 +++++++++++++ .prettierignore | 3 + python-client/pyproject.toml | 7 ++ python-client/river/__init__.py | 6 +- python-client/river/client.py | 33 +++----- python-client/river/codec.py | 2 +- python-client/river/session.py | 13 +--- python-client/river/streams.py | 10 ++- python-client/river/transport.py | 59 ++++----------- python-client/river/types.py | 5 +- python-client/tests/conftest.py | 12 +-- python-client/tests/test_e2e.py | 116 +++++++++++++++++++++++------ python-client/tests/test_server.ts | 34 ++++++--- 13 files changed, 225 insertions(+), 125 deletions(-) create mode 100644 .github/workflows/ci.yml diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 00000000..cef50ef4 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,50 @@ +name: Build and Test + +on: + push: + branches: [main] + pull_request: + branches: [main] + +jobs: + build-and-test: + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-latest, macos-latest] + steps: + - uses: actions/checkout@v4 + + - name: Setup Node + uses: actions/setup-node@v4 + with: + node-version: 22 + + - name: Install Node dependencies + run: npm ci + + - name: TypeScript check + run: npm run check + + - name: TypeScript tests + run: npm run test:single + + - name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: '3.12' + + - name: Install Python dependencies + working-directory: python-client + run: pip install -e ".[dev]" + + - name: Python lint + working-directory: python-client + run: | + pip install ruff + ruff check . + ruff format --check . + + - name: Python tests + working-directory: python-client + run: python -m pytest tests/ -v diff --git a/.prettierignore b/.prettierignore index 9c247287..0a0d4376 100644 --- a/.prettierignore +++ b/.prettierignore @@ -1,2 +1,5 @@ .cache node_modules +python-client/.venv +python-client/.pytest_cache +python-client/tests/test_server.mjs diff --git a/python-client/pyproject.toml b/python-client/pyproject.toml index 9d681b81..7e9f2fa4 100644 --- a/python-client/pyproject.toml +++ b/python-client/pyproject.toml @@ -18,11 +18,18 @@ dependencies = [ dev = [ "pytest>=8.0", "pytest-asyncio>=0.23", + "ruff>=0.4", ] [tool.pytest.ini_options] asyncio_mode = "auto" testpaths = ["tests"] +[tool.ruff] +target-version = "py310" + +[tool.ruff.lint] +select = ["E", "F", "I", "W"] + [tool.setuptools.packages.find] include = ["river*"] diff --git a/python-client/river/__init__.py b/python-client/river/__init__.py index 01b30d92..1a0d3ddf 100644 --- a/python-client/river/__init__.py +++ b/python-client/river/__init__.py @@ -1,10 +1,10 @@ """River protocol v2.0 Python client implementation.""" -from river.types import TransportMessage, Ok, Err -from river.codec import NaiveJsonCodec, BinaryCodec -from river.transport import WebSocketClientTransport from river.client import RiverClient +from river.codec import BinaryCodec, NaiveJsonCodec from river.streams import Readable, Writable +from river.transport import WebSocketClientTransport +from river.types import Err, Ok, TransportMessage __all__ = [ "RiverClient", diff --git a/python-client/river/client.py b/python-client/river/client.py index eb6210d3..cf8956ef 100644 --- a/python-client/river/client.py +++ b/python-client/river/client.py @@ -14,6 +14,8 @@ from river.streams import Readable, Writable from river.transport import WebSocketClientTransport from river.types import ( + CANCEL_CODE, + UNEXPECTED_DISCONNECT_CODE, ControlFlags, PartialTransportMessage, TransportMessage, @@ -21,11 +23,8 @@ close_stream_message, err_result, generate_id, - is_ack, is_stream_cancel, is_stream_close, - CANCEL_CODE, - UNEXPECTED_DISCONNECT_CODE, ) logger = logging.getLogger(__name__) @@ -130,9 +129,7 @@ async def rpc( readable = result["res_readable"] done, value = await readable.next() if done: - return err_result( - UNEXPECTED_DISCONNECT_CODE, "No response received" - ) + return err_result(UNEXPECTED_DISCONNECT_CODE, "No response received") return value def stream( @@ -181,9 +178,7 @@ async def finalize() -> dict[str, Any]: readable = result["res_readable"] done, value = await readable.next() if done: - return err_result( - UNEXPECTED_DISCONNECT_CODE, "No response received" - ) + return err_result(UNEXPECTED_DISCONNECT_CODE, "No response received") return value return UploadResult( @@ -230,9 +225,7 @@ def _handle_proc( if transport.get_status() != "open": res_readable = Readable() res_readable._push_value( - err_result( - UNEXPECTED_DISCONNECT_CODE, "transport is closed" - ) + err_result(UNEXPECTED_DISCONNECT_CODE, "transport is closed") ) res_readable._trigger_close() req_writable = Writable(write_cb=lambda _: None, close_cb=None) @@ -335,12 +328,12 @@ def on_message(msg: TransportMessage) -> None: if isinstance(payload, dict) and "ok" in payload: res_readable._push_value(payload) else: - res_readable._push_value( - err_result( - payload.get("code", "UNKNOWN") if isinstance(payload, dict) else "UNKNOWN", - str(payload), - ) + code = ( + payload.get("code", "UNKNOWN") + if isinstance(payload, dict) + else "UNKNOWN" ) + res_readable._push_value(err_result(code, str(payload))) close_readable() if req_writable.is_writable(): req_writable._closed = True @@ -387,9 +380,7 @@ def on_client_cancel() -> None: nonlocal clean_close clean_close = False try: - res_readable._push_value( - err_result(CANCEL_CODE, "cancelled by client") - ) + res_readable._push_value(err_result(CANCEL_CODE, "cancelled by client")) except RuntimeError: pass close_readable() @@ -439,7 +430,7 @@ async def _watch_abort(): procedure_name=procedure_name, ) ) - except RuntimeError as e: + except RuntimeError: # Session dead at send time try: res_readable._push_value( diff --git a/python-client/river/codec.py b/python-client/river/codec.py index 3d5e8f11..8bbabd84 100644 --- a/python-client/river/codec.py +++ b/python-client/river/codec.py @@ -2,8 +2,8 @@ from __future__ import annotations -import json import base64 +import json from abc import ABC, abstractmethod from typing import Any diff --git a/python-client/river/session.py b/python-client/river/session.py index f97daffb..996cd3ad 100644 --- a/python-client/river/session.py +++ b/python-client/river/session.py @@ -8,20 +8,17 @@ import asyncio import logging import time -from dataclasses import dataclass, field +from dataclasses import dataclass from enum import Enum from typing import Any, Callable from river.codec import CodecMessageAdapter from river.types import ( - ControlFlags, PartialTransportMessage, TransportMessage, generate_id, handshake_request_payload, heartbeat_message, - is_ack, - PROTOCOL_VERSION, ) logger = logging.getLogger(__name__) @@ -109,9 +106,7 @@ def next_seq(self) -> int: return self.send_buffer[0].seq return self.seq - def construct_msg( - self, partial: PartialTransportMessage - ) -> TransportMessage: + def construct_msg(self, partial: PartialTransportMessage) -> TransportMessage: """Construct a full TransportMessage from a partial one. Fills in id, from, to, seq, ack and increments seq. @@ -326,9 +321,7 @@ def destroy(self) -> None: self._ws = None self.send_buffer.clear() - def create_handshake_request( - self, metadata: Any = None - ) -> TransportMessage: + def create_handshake_request(self, metadata: Any = None) -> TransportMessage: """Create a handshake request transport message. Handshake messages have seq=0, ack=0, controlFlags=0. diff --git a/python-client/river/streams.py b/python-client/river/streams.py index cbca9601..ae01c309 100644 --- a/python-client/river/streams.py +++ b/python-client/river/streams.py @@ -3,7 +3,7 @@ from __future__ import annotations import asyncio -from typing import Any, Callable, Generic, TypeVar +from typing import Callable, Generic, TypeVar T = TypeVar("T") @@ -105,7 +105,13 @@ async def _iterate(self): self._locked = True while True: if self._broken: - yield {"ok": False, "payload": {"code": "READABLE_BROKEN", "message": "stream was broken"}} + yield { + "ok": False, + "payload": { + "code": "READABLE_BROKEN", + "message": "stream was broken", + }, + } return if self._queue: diff --git a/python-client/river/transport.py b/python-client/river/transport.py index 5ca7ca72..92655be7 100644 --- a/python-client/river/transport.py +++ b/python-client/river/transport.py @@ -8,26 +8,17 @@ import asyncio import logging -import math import random -import time from typing import Any, Callable from river.codec import Codec, CodecMessageAdapter, NaiveJsonCodec -from river.session import Session, SessionOptions, SessionState, DEFAULT_SESSION_OPTIONS +from river.session import DEFAULT_SESSION_OPTIONS, Session, SessionOptions, SessionState from river.types import ( - ControlFlags, + RETRIABLE_HANDSHAKE_CODES, PartialTransportMessage, TransportMessage, generate_id, is_ack, - is_stream_cancel, - is_stream_close, - is_stream_open, - RETRIABLE_HANDSHAKE_CODES, - FATAL_HANDSHAKE_CODES, - UNEXPECTED_DISCONNECT_CODE, - err_result, ) logger = logging.getLogger(__name__) @@ -88,9 +79,7 @@ def get_backoff_ms(self) -> float: return 0 exponent = max(0, self.budget_consumed - 1) jitter = random.random() * self.max_jitter_ms - backoff = min( - self.base_interval_ms * (2**exponent), self.max_backoff_ms - ) + backoff = min(self.base_interval_ms * (2**exponent), self.max_backoff_ms) return backoff + jitter def consume_budget(self) -> None: @@ -104,9 +93,7 @@ def start_restoring_budget(self) -> None: async def _restore_loop(): try: while self.budget_consumed > 0: - await asyncio.sleep( - self.budget_restore_interval_ms / 1000.0 - ) + await asyncio.sleep(self.budget_restore_interval_ms / 1000.0) self.budget_consumed = max(0, self.budget_consumed - 1) except asyncio.CancelledError: pass @@ -212,9 +199,7 @@ def _delete_session(self, to: str, emit_closing: bool = True) -> None: "sessionStatus", {"status": "closing", "session": session} ) session.destroy() - self._events.dispatch( - "sessionStatus", {"status": "closed", "session": session} - ) + self._events.dispatch("sessionStatus", {"status": "closed", "session": session}) def _on_session_grace_elapsed(self, to: str) -> None: """Called when a session's grace period expires.""" @@ -285,19 +270,17 @@ async def _create_connection(self, to: str) -> Any: url = self._ws_url if isinstance(self._ws_url, str) else self._ws_url(to) ws = await asyncio.wait_for( - websockets.connect(url, max_size=None, ping_interval=None, ping_timeout=None), + websockets.connect( + url, max_size=None, ping_interval=None, ping_timeout=None + ), timeout=self.options.connection_timeout_ms / 1000.0, ) return ws - async def _do_handshake( - self, session: Session, ws: Any, to: str - ) -> None: + async def _do_handshake(self, session: Session, ws: Any, to: str) -> None: """Perform the handshake on a newly connected WebSocket.""" # Send handshake request - hs_msg = session.create_handshake_request( - metadata=self._handshake_metadata - ) + hs_msg = session.create_handshake_request(metadata=self._handshake_metadata) ok, buf = self._codec_adapter.to_buffer(hs_msg) if not ok: logger.error("Failed to encode handshake: %s", buf) @@ -332,10 +315,7 @@ async def _do_handshake( payload = response_msg.payload # Validate handshake response - if ( - not isinstance(payload, dict) - or payload.get("type") != "HANDSHAKE_RESP" - ): + if not isinstance(payload, dict) or payload.get("type") != "HANDSHAKE_RESP": logger.error("Invalid handshake response payload") await ws.close() self._on_connection_failed(to) @@ -345,9 +325,7 @@ async def _do_handshake( if not status.get("ok"): code = status.get("code", "UNKNOWN") reason = status.get("reason", "Unknown reason") - logger.debug( - "Handshake rejected for %s: %s (%s)", to, reason, code - ) + logger.debug("Handshake rejected for %s: %s (%s)", to, reason, code) await ws.close() if code in RETRIABLE_HANDSHAKE_CODES: @@ -405,9 +383,7 @@ async def _do_handshake( # Start listening for messages self._start_message_listener(session, ws, to) - def _start_message_listener( - self, session: Session, ws: Any, to: str - ) -> None: + def _start_message_listener(self, session: Session, ws: Any, to: str) -> None: """Start the async message listener on the WebSocket.""" loop = self._get_loop() @@ -423,18 +399,14 @@ async def _listen(): self._on_message_data(session, raw_msg, to) except Exception as e: if not session._destroyed: - logger.debug( - "WebSocket error for session %s: %s", session.id, e - ) + logger.debug("WebSocket error for session %s: %s", session.id, e) finally: if not session._destroyed: self._on_connection_dropped(to) loop.create_task(_listen()) - def _on_message_data( - self, session: Session, raw: bytes, to: str - ) -> None: + def _on_message_data(self, session: Session, raw: bytes, to: str) -> None: """Handle raw bytes received from the WebSocket.""" ok, result = self._codec_adapter.from_buffer(raw) if not ok: @@ -499,7 +471,6 @@ def _on_connection_failed(self, to: str) -> None: if session is None or session._destroyed: return - loop = self._get_loop() session.state = SessionState.NO_CONNECTION if self._reconnect_on_connection_drop: diff --git a/python-client/river/types.py b/python-client/river/types.py index 591ed830..2e81f64c 100644 --- a/python-client/river/types.py +++ b/python-client/river/types.py @@ -2,12 +2,11 @@ from __future__ import annotations -import string import random +import string from dataclasses import dataclass, field from enum import IntFlag -from typing import Any, TypeVar, Generic, Union - +from typing import Any, Generic, TypeVar, Union # --- ID Generation --- diff --git a/python-client/tests/conftest.py b/python-client/tests/conftest.py index 4299e9d2..0c0c7f36 100644 --- a/python-client/tests/conftest.py +++ b/python-client/tests/conftest.py @@ -16,7 +16,6 @@ import pytest - TESTS_DIR = os.path.dirname(__file__) SERVER_TS = os.path.join(TESTS_DIR, "test_server.ts") SERVER_MJS = os.path.join(TESTS_DIR, "test_server.mjs") @@ -48,9 +47,7 @@ def _build_test_server() -> None: text=True, ) if result.returncode != 0: - raise RuntimeError( - f"esbuild failed ({result.returncode}):\n{result.stderr}" - ) + raise RuntimeError(f"esbuild failed ({result.returncode}):\n{result.stderr}") @pytest.fixture(scope="session") @@ -85,12 +82,9 @@ def river_server_port() -> Generator[int, None, None]: line = proc.stdout.readline().decode("utf-8").strip() if not line: if proc.poll() is not None: - stderr = ( - proc.stderr.read().decode("utf-8") if proc.stderr else "" - ) + stderr = proc.stderr.read().decode("utf-8") if proc.stderr else "" raise RuntimeError( - f"Test server exited with code {proc.returncode}.\n" - f"stderr: {stderr}" + f"Test server exited with code {proc.returncode}.\nstderr: {stderr}" ) time.sleep(0.1) continue diff --git a/python-client/tests/test_e2e.py b/python-client/tests/test_e2e.py index de5ae8b5..38ef834a 100644 --- a/python-client/tests/test_e2e.py +++ b/python-client/tests/test_e2e.py @@ -7,12 +7,12 @@ from __future__ import annotations import asyncio + import pytest from river.client import RiverClient -from river.transport import WebSocketClientTransport from river.codec import NaiveJsonCodec - +from river.transport import WebSocketClientTransport # -- helpers -- @@ -78,9 +78,7 @@ async def test_concurrent_rpcs(self, server_url: str): """Multiple concurrent RPCs all complete correctly.""" client = await make_client(server_url) try: - tasks = [ - client.rpc("ordering", "add", {"n": i}) for i in range(10) - ] + tasks = [client.rpc("ordering", "add", {"n": i}) for i in range(10)] results = await asyncio.gather(*tasks) for i, result in enumerate(results): assert result["ok"] is True @@ -140,9 +138,7 @@ async def test_stream_with_init_message(self, server_url: str): """Stream handler receives the init message.""" client = await make_client(server_url) try: - stream = client.stream( - "test", "echoWithPrefix", {"prefix": "test"} - ) + stream = client.stream("test", "echoWithPrefix", {"prefix": "test"}) stream.req_writable.write({"msg": "hello", "ignore": False}) stream.req_writable.write({"msg": "world", "ignore": False}) stream.req_writable.close() @@ -481,9 +477,7 @@ async def test_stream_idempotent_close(self, server_url: str): client = await make_client(server_url) try: abort_evt = asyncio.Event() - stream = client.stream( - "test", "echo", {}, abort_signal=abort_evt - ) + stream = client.stream("test", "echo", {}, abort_signal=abort_evt) stream.req_writable.write({"msg": "abc", "ignore": False}) stream.req_writable.close() @@ -517,9 +511,7 @@ async def test_subscription_idempotent_close(self, server_url: str): client = await make_client(server_url) try: abort_evt = asyncio.Event() - sub = client.subscribe( - "subscribable", "value", {}, abort_signal=abort_evt - ) + sub = client.subscribe("subscribable", "value", {}, abort_signal=abort_evt) # Read initial value done, msg = await sub.res_readable.next() assert not done @@ -546,9 +538,7 @@ async def test_cancellation_after_transport_close(self, server_url: str): client = await make_client(server_url) try: abort_evt = asyncio.Event() - stream = client.stream( - "test", "echo", {}, abort_signal=abort_evt - ) + stream = client.stream("test", "echo", {}, abort_signal=abort_evt) stream.req_writable.write({"msg": "1", "ignore": False}) done, msg = await stream.res_readable.next() assert not done @@ -597,6 +587,89 @@ async def test_eagerly_connect(self, server_url: str): await transport.close() +# ===================================================================== +# Transparent Reconnect Tests +# ===================================================================== + + +class TestTransparentReconnect: + @pytest.mark.asyncio + async def test_reconnect_with_concurrent_streams(self, server_url: str): + """Multiple concurrent streams survive a connection drop and reconnect.""" + from river.session import SessionState + + transport = WebSocketClientTransport( + ws_url=server_url, + server_id="SERVER", + codec=NaiveJsonCodec(), + ) + client = RiverClient(transport, server_id="SERVER") + try: + # Open three concurrent streams with different prefixes + stream_a = client.stream("test", "echoWithPrefix", {"prefix": "A"}) + stream_b = client.stream("test", "echoWithPrefix", {"prefix": "B"}) + stream_c = client.stream("test", "echoWithPrefix", {"prefix": "C"}) + + # Send initial messages on each stream and verify they work + stream_a.req_writable.write({"msg": "1", "ignore": False}) + stream_b.req_writable.write({"msg": "1", "ignore": False}) + stream_c.req_writable.write({"msg": "1", "ignore": False}) + + done_a, msg_a = await stream_a.res_readable.next() + done_b, msg_b = await stream_b.res_readable.next() + done_c, msg_c = await stream_c.res_readable.next() + + assert not done_a and msg_a["payload"]["response"] == "A 1" + assert not done_b and msg_b["payload"]["response"] == "B 1" + assert not done_c and msg_c["payload"]["response"] == "C 1" + + # Force-close the WebSocket to simulate a network drop + session = transport.sessions.get("SERVER") + assert session is not None + assert session._ws is not None + await session._ws.close() + + # Wait for reconnection + reconnected = asyncio.Event() + + def on_transition(evt): + if evt.get("state") == SessionState.CONNECTED: + reconnected.set() + + transport.add_event_listener("sessionTransition", on_transition) + await asyncio.wait_for(reconnected.wait(), timeout=5.0) + transport.remove_event_listener("sessionTransition", on_transition) + + # Send more messages on all three streams after reconnect + stream_a.req_writable.write({"msg": "2", "ignore": False}) + stream_b.req_writable.write({"msg": "2", "ignore": False}) + stream_c.req_writable.write({"msg": "2", "ignore": False}) + + # Close all streams + stream_a.req_writable.close() + stream_b.req_writable.close() + stream_c.req_writable.close() + + # Verify the post-reconnect messages arrived correctly + done_a2, msg_a2 = await stream_a.res_readable.next() + done_b2, msg_b2 = await stream_b.res_readable.next() + done_c2, msg_c2 = await stream_c.res_readable.next() + + assert not done_a2 and msg_a2["payload"]["response"] == "A 2" + assert not done_b2 and msg_b2["payload"]["response"] == "B 2" + assert not done_c2 and msg_c2["payload"]["response"] == "C 2" + + # Streams should close cleanly + done_a3, _ = await stream_a.res_readable.next() + done_b3, _ = await stream_b.res_readable.next() + done_c3, _ = await stream_c.res_readable.next() + assert done_a3 + assert done_b3 + assert done_c3 + finally: + await transport.close() + + # ===================================================================== # Codec Tests # ===================================================================== @@ -810,9 +883,9 @@ def test_control_flags(self): from river.types import ( ControlFlags, is_ack, - is_stream_open, is_stream_cancel, is_stream_close, + is_stream_open, ) assert is_ack(ControlFlags.AckBit) @@ -975,9 +1048,10 @@ async def test_not_resolve_until_push(self): r: Readable = Readable() next_p = asyncio.ensure_future(r.next()) # Should not resolve yet - result = await asyncio.wait_for( - asyncio.shield(next_p), timeout=0.01 - ) if False else None + try: + await asyncio.wait_for(asyncio.shield(next_p), timeout=0.01) + except asyncio.TimeoutError: + pass done = next_p.done() assert not done, "next() should not resolve before push" diff --git a/python-client/tests/test_server.ts b/python-client/tests/test_server.ts index 4f5ab982..eb35e0b9 100644 --- a/python-client/tests/test_server.ts +++ b/python-client/tests/test_server.ts @@ -33,6 +33,7 @@ const TestServiceSchema = ServiceSchema.define({ responseError: Type.Never(), async handler({ reqInit }) { count += reqInit.n; + return Ok({ result: count }); }, }), @@ -77,7 +78,7 @@ const TestServiceSchema = ServiceSchema.define({ // ------------------------------------------------------------------- // OrderingService – for message ordering tests // ------------------------------------------------------------------- -const msgs: number[] = []; +const msgs: Array = []; const OrderingServiceSchema = ServiceSchema.define({ add: Procedure.rpc({ @@ -86,6 +87,7 @@ const OrderingServiceSchema = ServiceSchema.define({ responseError: Type.Never(), async handler({ reqInit }) { msgs.push(reqInit.n); + return Ok({ n: reqInit.n }); }, }), @@ -93,8 +95,10 @@ const OrderingServiceSchema = ServiceSchema.define({ requestInit: Type.Object({}), responseData: Type.Object({ msgs: Type.Array(Type.Number()) }), responseError: Type.Never(), - async handler() { - return Ok({ msgs: [...msgs] }); + async handler(_ctx) { + const copy: Array = [...msgs]; + + return Ok({ msgs: copy }); }, }), }); @@ -130,6 +134,7 @@ const FallibleServiceSchema = ServiceSchema.define({ message: 'Result is infinity', }); } + return Ok({ result }); }, }), @@ -169,6 +174,7 @@ const FallibleServiceSchema = ServiceSchema.define({ // SubscribableService – subscriptions // ------------------------------------------------------------------- let subCount = 0; + type SubListener = (val: number) => void; const subListeners = new Set(); @@ -180,6 +186,7 @@ const SubscribableServiceSchema = ServiceSchema.define({ async handler({ reqInit }) { subCount += reqInit.n; for (const l of subListeners) l(subCount); + return Ok({ result: subCount }); }, }), @@ -217,6 +224,7 @@ const UploadableServiceSchema = ServiceSchema.define({ if (!result.ok) break; total += result.payload.n; } + return Ok({ result: total }); }, }), @@ -231,6 +239,7 @@ const UploadableServiceSchema = ServiceSchema.define({ if (!result.ok) break; total += result.payload.n; } + return Ok({ result: `${reqInit.prefix} ${total}` }); }, }), @@ -249,12 +258,14 @@ const UploadableServiceSchema = ServiceSchema.define({ total += result.payload.n; if (total >= 10) { ctx.cancel(); + return Err({ code: 'CANCEL' as const, message: 'total exceeds limit', }); } } + return Ok({ result: total }); }, }), @@ -270,7 +281,7 @@ const CancellationServiceSchema = ServiceSchema.define({ responseError: Type.Never(), async handler({ ctx }) { // Block until cancelled - return new Promise((resolve) => { + return new Promise((_resolve) => { ctx.signal.addEventListener('abort', () => { // Handler will be cancelled by the framework, nothing to resolve }); @@ -282,7 +293,7 @@ const CancellationServiceSchema = ServiceSchema.define({ requestData: Type.Object({}), responseData: Type.Object({}), responseError: Type.Never(), - async handler({ ctx }) { + async handler(_ctx) { return new Promise(() => { // never resolves }); @@ -293,8 +304,8 @@ const CancellationServiceSchema = ServiceSchema.define({ requestData: Type.Object({}), responseData: Type.Object({}), responseError: Type.Never(), - async handler({ ctx }) { - return new Promise(() => { + async handler(_ctx) { + return new Promise(() => { // never resolves }); }, @@ -303,7 +314,7 @@ const CancellationServiceSchema = ServiceSchema.define({ requestInit: Type.Object({}), responseData: Type.Object({}), responseError: Type.Never(), - async handler({ ctx }) { + async handler(_ctx) { return new Promise(() => { // never resolves }); @@ -342,6 +353,7 @@ const CancellationServiceSchema = ServiceSchema.define({ for await (const result of reqReadable) { if (!result.ok) break; } + return Ok({ done: true }); }, }), @@ -405,20 +417,20 @@ async function main() { // Keep the server alive process.on('SIGTERM', () => { - _server.close().then(() => { + void _server.close().then(() => { httpServer.close(); process.exit(0); }); }); process.on('SIGINT', () => { - _server.close().then(() => { + void _server.close().then(() => { httpServer.close(); process.exit(0); }); }); } -main().catch((err) => { +main().catch((err: unknown) => { console.error('Failed to start test server:', err); process.exit(1); }); From 7a86745866548991040fe15b2e6d70eba3d6e6fa Mon Sep 17 00:00:00 2001 From: Jacky Zhao Date: Mon, 2 Mar 2026 15:58:17 -0800 Subject: [PATCH 05/29] more --- python-client/pyproject.toml | 5 + python-client/river/codegen/__init__.py | 10 + python-client/river/codegen/__main__.py | 56 + python-client/river/codegen/emitter.py | 174 ++ python-client/river/codegen/schema.py | 325 ++ .../river/codegen/templates/errors.py.j2 | 45 + .../river/codegen/templates/init.py.j2 | 11 + .../codegen/templates/service_client.py.j2 | 148 + .../river/codegen/templates/types.py.j2 | 25 + python-client/tests/conftest.py | 65 +- python-client/tests/extract_test_schema.mjs | 758 +++++ python-client/tests/extract_test_schema.ts | 361 +++ python-client/tests/generated/__init__.py | 17 + python-client/tests/generated/_errors.py | 45 + python-client/tests/generated/_types.py | 239 ++ .../tests/generated/cancel_client.py | 264 ++ python-client/tests/generated/conftest.py | 1 + .../tests/generated/fallible_client.py | 66 + .../tests/generated/ordering_client.py | 46 + .../tests/generated/subscribable_client.py | 60 + python-client/tests/generated/test_client.py | 99 + .../tests/generated/uploadable_client.py | 115 + python-client/tests/test_codegen.py | 383 +++ python-client/tests/test_schema.json | 2654 +++++++++++++++++ 24 files changed, 5964 insertions(+), 8 deletions(-) create mode 100644 python-client/river/codegen/__init__.py create mode 100644 python-client/river/codegen/__main__.py create mode 100644 python-client/river/codegen/emitter.py create mode 100644 python-client/river/codegen/schema.py create mode 100644 python-client/river/codegen/templates/errors.py.j2 create mode 100644 python-client/river/codegen/templates/init.py.j2 create mode 100644 python-client/river/codegen/templates/service_client.py.j2 create mode 100644 python-client/river/codegen/templates/types.py.j2 create mode 100644 python-client/tests/extract_test_schema.mjs create mode 100644 python-client/tests/extract_test_schema.ts create mode 100644 python-client/tests/generated/__init__.py create mode 100644 python-client/tests/generated/_errors.py create mode 100644 python-client/tests/generated/_types.py create mode 100644 python-client/tests/generated/cancel_client.py create mode 100644 python-client/tests/generated/conftest.py create mode 100644 python-client/tests/generated/fallible_client.py create mode 100644 python-client/tests/generated/ordering_client.py create mode 100644 python-client/tests/generated/subscribable_client.py create mode 100644 python-client/tests/generated/test_client.py create mode 100644 python-client/tests/generated/uploadable_client.py create mode 100644 python-client/tests/test_codegen.py create mode 100644 python-client/tests/test_schema.json diff --git a/python-client/pyproject.toml b/python-client/pyproject.toml index 7e9f2fa4..aaa1319b 100644 --- a/python-client/pyproject.toml +++ b/python-client/pyproject.toml @@ -12,6 +12,8 @@ license = {text = "MIT"} dependencies = [ "websockets>=12.0", "msgpack>=1.0", + "typing_extensions>=4.0", + "jinja2>=3.0", ] [project.optional-dependencies] @@ -33,3 +35,6 @@ select = ["E", "F", "I", "W"] [tool.setuptools.packages.find] include = ["river*"] + +[tool.setuptools.package-data] +"river.codegen" = ["templates/*.j2"] diff --git a/python-client/river/codegen/__init__.py b/python-client/river/codegen/__init__.py new file mode 100644 index 00000000..d7c4e680 --- /dev/null +++ b/python-client/river/codegen/__init__.py @@ -0,0 +1,10 @@ +"""River protocol codegen — generates typed Python clients from JSON Schema.""" + +from river.codegen.emitter import write_generated_files +from river.codegen.schema import SchemaConverter, SchemaIR + +__all__ = [ + "SchemaConverter", + "SchemaIR", + "write_generated_files", +] diff --git a/python-client/river/codegen/__main__.py b/python-client/river/codegen/__main__.py new file mode 100644 index 00000000..09e88a2e --- /dev/null +++ b/python-client/river/codegen/__main__.py @@ -0,0 +1,56 @@ +"""CLI entry point: python -m river.codegen + +Usage: + python -m river.codegen --schema schema.json --output generated/ +""" + +from __future__ import annotations + +import argparse +import json + +from river.codegen.emitter import write_generated_files +from river.codegen.schema import SchemaConverter + + +def main(argv: list[str] | None = None) -> None: + parser = argparse.ArgumentParser( + prog="river.codegen", + description="Generate typed Python clients from a River JSON schema.", + ) + parser.add_argument( + "--schema", + "-s", + required=True, + help="Path to the serialized schema JSON file.", + ) + parser.add_argument( + "--output", + "-o", + required=True, + help="Output directory for generated files.", + ) + parser.add_argument( + "--package", + default=None, + help="Absolute import prefix instead of relative imports.", + ) + + args = parser.parse_args(argv) + + with open(args.schema) as f: + raw_schema = json.load(f) + + converter = SchemaConverter() + ir = converter.convert(raw_schema) + + written = write_generated_files(ir, args.output, package=args.package) + + for path in written: + print(f" wrote {path}") + + print(f"Generated {len(written)} files in {args.output}") + + +if __name__ == "__main__": + main() diff --git a/python-client/river/codegen/emitter.py b/python-client/river/codegen/emitter.py new file mode 100644 index 00000000..8213c338 --- /dev/null +++ b/python-client/river/codegen/emitter.py @@ -0,0 +1,174 @@ +"""IR → Python source file emitter. + +Renders Jinja2 templates from the ``templates/`` directory against +a :class:`SchemaIR` to produce the generated output package. +""" + +from __future__ import annotations + +import os +from pathlib import Path + +import jinja2 + +from river.codegen.schema import SchemaIR, ServiceDef, _to_pascal_case + +_TEMPLATE_DIR = Path(__file__).parent / "templates" + +_env = jinja2.Environment( + loader=jinja2.FileSystemLoader(str(_TEMPLATE_DIR)), + keep_trailing_newline=True, + lstrip_blocks=True, + trim_blocks=True, +) +_env.filters["pascal"] = _to_pascal_case + + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + + +def _field_annotation(f) -> str: # noqa: ANN001 + """Return the full annotation for a TypedDict field.""" + ann = f.type_ref.annotation + if not f.required: + return f"NotRequired[{ann}]" + return ann + + +def _collect_used_type_names(svc: ServiceDef, ir: SchemaIR) -> list[str]: + """Collect TypedDict names actually referenced in method signatures.""" + td_names = {td.name for td in ir.typedicts} + names: set[str] = set() + + for proc in svc.procedures: + _extract_names(proc.init_type.annotation, td_names, names) + if proc.input_type: + _extract_names(proc.input_type.annotation, td_names, names) + + return sorted(names) + + +def _extract_names(annotation: str, known: set[str], out: set[str]) -> None: + for part in annotation.replace("|", " ").split(): + clean = part.strip("[]").strip() + if clean in known: + out.add(clean) + + +# --------------------------------------------------------------------------- +# Rendering +# --------------------------------------------------------------------------- + + +def _prepare_typedicts(ir: SchemaIR) -> list[dict]: + """Prepare TypedDict data for the types template.""" + result = [] + for td in ir.typedicts: + fields = [] + for f in td.fields: + fields.append({"name": f.name, "annotation": _field_annotation(f)}) + result.append( + {"name": td.name, "description": td.description, "fields": fields} + ) + return result + + +def render_errors() -> str: + return _env.get_template("errors.py.j2").render() + + +def render_types(ir: SchemaIR) -> str: + typedicts = _prepare_typedicts(ir) + + needs_literal = any( + "Literal[" in f["annotation"] for td in typedicts for f in td["fields"] + ) + has_not_required = any( + "NotRequired[" in f["annotation"] for td in typedicts for f in td["fields"] + ) + + typing_ext = ["TypedDict"] + if has_not_required: + typing_ext.append("NotRequired") + + return _env.get_template("types.py.j2").render( + typedicts=typedicts, + needs_literal=needs_literal, + typing_ext_imports=sorted(typing_ext), + ) + + +def render_service_client(svc: ServiceDef, ir: SchemaIR, import_prefix: str) -> str: + type_names = _collect_used_type_names(svc, ir) + types_module = "._types" if import_prefix == "." else f"{import_prefix}_types" + + needs_readable = any( + p.proc_type in ("stream", "subscription") for p in svc.procedures + ) + needs_writable = any(p.proc_type in ("stream", "upload") for p in svc.procedures) + + wrappers = [ + p for p in svc.procedures if p.proc_type in ("stream", "upload", "subscription") + ] + + return _env.get_template("service_client.py.j2").render( + service=svc, + type_names=type_names, + types_module=types_module, + needs_readable=needs_readable, + needs_writable=needs_writable, + wrappers=wrappers, + ) + + +def render_init(ir: SchemaIR, import_prefix: str) -> str: + imports = [] + for svc in ir.services: + if import_prefix == ".": + mod = f".{svc.name}_client" + else: + mod = f"{import_prefix}{svc.name}_client" + imports.append((mod, f"{svc.class_name}Client")) + + imports.sort(key=lambda x: x[0]) + + return _env.get_template("init.py.j2").render(imports=imports) + + +# --------------------------------------------------------------------------- +# Top-level write function +# --------------------------------------------------------------------------- + + +def write_generated_files( + ir: SchemaIR, + output_dir: str, + package: str | None = None, +) -> list[str]: + """Write all generated files to *output_dir*. + + Returns the list of written file paths. + """ + os.makedirs(output_dir, exist_ok=True) + import_prefix = f"{package}." if package else "." + written: list[str] = [] + + def _write(name: str, content: str) -> None: + p = Path(output_dir) / name + p.write_text(content) + written.append(str(p)) + + _write("_errors.py", render_errors()) + _write("_types.py", render_types(ir)) + + for svc in ir.services: + _write( + f"{svc.name}_client.py", + render_service_client(svc, ir, import_prefix), + ) + + _write("__init__.py", render_init(ir, import_prefix)) + + return written diff --git a/python-client/river/codegen/schema.py b/python-client/river/codegen/schema.py new file mode 100644 index 00000000..9045ae33 --- /dev/null +++ b/python-client/river/codegen/schema.py @@ -0,0 +1,325 @@ +"""JSON Schema → Python IR conversion. + +Parses the serialized River schema (produced by serializeSchema() in TS) +into intermediate representation dataclasses that the emitter can turn +into Python source files. +""" + +from __future__ import annotations + +import keyword +import re +from dataclasses import dataclass, field + +# --------------------------------------------------------------------------- +# IR types +# --------------------------------------------------------------------------- + + +@dataclass +class TypeRef: + """A reference to a Python type, either inline or named.""" + + annotation: str # e.g. "str", "int", "list[float]", "TestAddInit" + + +@dataclass +class TypedDictField: + name: str + type_ref: TypeRef + required: bool = True + description: str | None = None + + +@dataclass +class TypedDictDef: + """A TypedDict class to be emitted.""" + + name: str + fields: list[TypedDictField] = field(default_factory=list) + description: str | None = None + + +@dataclass +class ProcedureDef: + """Describes a single procedure in a service.""" + + name: str # camelCase wire name + py_name: str # snake_case Python method name + proc_type: str # "rpc" | "stream" | "upload" | "subscription" + init_type: TypeRef # type annotation for init param + input_type: TypeRef | None # only for stream/upload + output_type: TypeRef # ok payload type + error_type: TypeRef | None # service-specific errors + description: str | None = None + + +@dataclass +class ServiceDef: + """Describes a single service.""" + + name: str # wire name + class_name: str # PascalCase Python class name + procedures: list[ProcedureDef] = field(default_factory=list) + + +@dataclass +class SchemaIR: + """Complete intermediate representation for the whole server schema.""" + + services: list[ServiceDef] = field(default_factory=list) + typedicts: list[TypedDictDef] = field(default_factory=list) + + +# --------------------------------------------------------------------------- +# Protocol error codes (always present in the errors union) +# --------------------------------------------------------------------------- + +PROTOCOL_ERROR_CODES = frozenset( + {"UNCAUGHT_ERROR", "UNEXPECTED_DISCONNECT", "INVALID_REQUEST", "CANCEL"} +) + + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + + +def _to_pascal_case(s: str) -> str: + """Convert a camelCase, snake_case, or space-separated string to PascalCase.""" + # Handle snake_case or space-separated + if "_" in s or " " in s: + # Split on underscores and spaces + words = re.split(r"[_ ]+", s) + return "".join(word.capitalize() for word in words if word) + # camelCase → PascalCase: just capitalize first letter + if s: + return s[0].upper() + s[1:] + return s + + +def _to_snake_case(s: str) -> str: + """Convert camelCase to snake_case.""" + result = re.sub(r"([A-Z])", r"_\1", s).lower() + result = result.lstrip("_") + if keyword.iskeyword(result): + result += "_" + return result + + +def _safe_field_name(name: str) -> str: + """Ensure a field name is a valid Python identifier.""" + if keyword.iskeyword(name): + return name + "_" + return name + + +# --------------------------------------------------------------------------- +# JSON Schema → TypeRef conversion +# --------------------------------------------------------------------------- + + +class SchemaConverter: + """Converts a serialized River server schema into SchemaIR.""" + + def __init__(self) -> None: + self._typedicts: list[TypedDictDef] = [] + + def convert(self, raw: dict) -> SchemaIR: + """Convert the top-level serialized schema dict to IR.""" + services: list[ServiceDef] = [] + for svc_name, svc_data in raw.get("services", {}).items(): + svc_def = self._convert_service(svc_name, svc_data) + services.append(svc_def) + + return SchemaIR(services=services, typedicts=list(self._typedicts)) + + def _convert_service(self, name: str, data: dict) -> ServiceDef: + class_name = _to_pascal_case(name) + procedures: list[ProcedureDef] = [] + for proc_name, proc_data in data.get("procedures", {}).items(): + proc_def = self._convert_procedure(class_name, proc_name, proc_data) + procedures.append(proc_def) + return ServiceDef( + name=name, + class_name=class_name, + procedures=procedures, + ) + + def _convert_procedure(self, svc_class: str, name: str, data: dict) -> ProcedureDef: + proc_type = data["type"] + prefix = svc_class + _to_pascal_case(name) + + # Init type + init_type = self._schema_to_typeref(data["init"], f"{prefix}Init") + + # Input type (only for stream/upload) + input_type = None + if "input" in data: + input_type = self._schema_to_typeref(data["input"], f"{prefix}Input") + + # Output type + output_type = self._schema_to_typeref(data["output"], f"{prefix}Output") + + # Error type — separate protocol errors from service errors + error_type = self._extract_service_errors(data.get("errors"), prefix) + + description = data.get("description") + + return ProcedureDef( + name=name, + py_name=_to_snake_case(name), + proc_type=proc_type, + init_type=init_type, + input_type=input_type, + output_type=output_type, + error_type=error_type, + description=description, + ) + + def _extract_service_errors( + self, errors_schema: dict | None, prefix: str + ) -> TypeRef | None: + """Extract non-protocol errors from the errors union.""" + if errors_schema is None: + return None + + variants = errors_schema.get("anyOf", []) + service_variants = [] + for v in variants: + code_schema = v.get("properties", {}).get("code", {}) + code_const = code_schema.get("const") + if code_const and code_const in PROTOCOL_ERROR_CODES: + continue + service_variants.append(v) + + if not service_variants: + return None + + if len(service_variants) == 1: + return self._schema_to_typeref(service_variants[0], f"{prefix}Error") + + # Multiple service error variants → union + refs: list[TypeRef] = [] + for i, v in enumerate(service_variants): + code_schema = v.get("properties", {}).get("code", {}) + code_const = code_schema.get("const") + if code_const: + suffix = _to_pascal_case(code_const.lower().replace("_", " ")) + td_name = f"{prefix}Error{suffix}" + else: + td_name = f"{prefix}Error{i}" + refs.append(self._schema_to_typeref(v, td_name)) + + parts = " | ".join(r.annotation for r in refs) + return TypeRef(annotation=parts) + + def _schema_to_typeref(self, schema: dict, name_hint: str) -> TypeRef: + """Convert a JSON Schema node to a TypeRef, potentially creating TypedDicts.""" + if not isinstance(schema, dict): + return TypeRef(annotation="Any") + + # const + if "const" in schema: + val = schema["const"] + if isinstance(val, str): + return TypeRef(annotation=f'Literal["{val}"]') + return TypeRef(annotation=f"Literal[{val!r}]") + + # anyOf (union) + if "anyOf" in schema: + return self._convert_union(schema, name_hint) + + schema_type = schema.get("type") + + # Primitive types + if schema_type == "string": + return TypeRef(annotation="str") + if schema_type == "number": + return TypeRef(annotation="float") + if schema_type == "integer": + return TypeRef(annotation="int") + if schema_type == "boolean": + return TypeRef(annotation="bool") + if schema_type == "null": + return TypeRef(annotation="None") + if schema_type == "Uint8Array": + return TypeRef(annotation="bytes") + + # Array + if schema_type == "array": + items = schema.get("items", {}) + item_ref = self._schema_to_typeref(items, f"{name_hint}Item") + return TypeRef(annotation=f"list[{item_ref.annotation}]") + + # Object → TypedDict + if schema_type == "object": + return self._convert_object(schema, name_hint) + + # Fallback + return TypeRef(annotation="Any") + + def _convert_object(self, schema: dict, name: str) -> TypeRef: + """Convert a JSON Schema object to a TypedDict and return a ref to it.""" + properties = schema.get("properties", {}) + required_set = set(schema.get("required", [])) + description = schema.get("description") + + fields: list[TypedDictField] = [] + for prop_name, prop_schema in properties.items(): + field_name = _safe_field_name(prop_name) + nested_name = name + _to_pascal_case(prop_name) + field_ref = self._schema_to_typeref(prop_schema, nested_name) + field_desc = ( + prop_schema.get("description") + if isinstance(prop_schema, dict) + else None + ) + fields.append( + TypedDictField( + name=field_name, + type_ref=field_ref, + required=prop_name in required_set, + description=field_desc, + ) + ) + + td = TypedDictDef(name=name, fields=fields, description=description) + self._typedicts.append(td) + return TypeRef(annotation=name) + + def _convert_union(self, schema: dict, name_hint: str) -> TypeRef: + """Convert a JSON Schema anyOf to a Union type.""" + variants = schema.get("anyOf", []) + if len(variants) == 1: + return self._schema_to_typeref(variants[0], name_hint) + + refs: list[TypeRef] = [] + for i, v in enumerate(variants): + # Try to derive a meaningful name from a const code or description + variant_name = self._derive_variant_name(v, name_hint, i) + refs.append(self._schema_to_typeref(v, variant_name)) + + parts = " | ".join(r.annotation for r in refs) + return TypeRef(annotation=parts) + + def _derive_variant_name(self, variant: dict, base_name: str, index: int) -> str: + """Derive a name for a union variant.""" + # Check for a const code field + props = variant.get("properties", {}) + code_schema = props.get("code", {}) + if isinstance(code_schema, dict) and "const" in code_schema: + code_val = code_schema["const"] + suffix = _to_pascal_case( + code_val.lower().replace("_", " ").replace("-", " ") + ) + return f"{base_name}{suffix}" + + # Check for description + desc = variant.get("description") + if desc: + safe = re.sub(r"[^a-zA-Z0-9]", "", desc) + if safe: + return f"{base_name}{_to_pascal_case(safe)}" + + return f"{base_name}Variant{index}" diff --git a/python-client/river/codegen/templates/errors.py.j2 b/python-client/river/codegen/templates/errors.py.j2 new file mode 100644 index 00000000..ba3e8c37 --- /dev/null +++ b/python-client/river/codegen/templates/errors.py.j2 @@ -0,0 +1,45 @@ +"""Protocol-level error types for the River protocol. + +These errors can be returned by any procedure regardless of its +service-specific error schema. +""" + +from __future__ import annotations + +from typing import Literal + +from typing_extensions import NotRequired, TypedDict + + +class UncaughtError(TypedDict): + code: Literal["UNCAUGHT_ERROR"] + message: str + + +class UnexpectedDisconnect(TypedDict): + code: Literal["UNEXPECTED_DISCONNECT"] + message: str + + +class InvalidRequestExtrasItem(TypedDict): + path: str + message: str + + +class InvalidRequestExtras(TypedDict): + firstValidationErrors: list[InvalidRequestExtrasItem] + totalErrors: float + + +class InvalidRequest(TypedDict): + code: Literal["INVALID_REQUEST"] + message: str + extras: NotRequired[InvalidRequestExtras] + + +class Cancel(TypedDict): + code: Literal["CANCEL"] + message: str + + +ProtocolError = UncaughtError | UnexpectedDisconnect | InvalidRequest | Cancel diff --git a/python-client/river/codegen/templates/init.py.j2 b/python-client/river/codegen/templates/init.py.j2 new file mode 100644 index 00000000..2909e173 --- /dev/null +++ b/python-client/river/codegen/templates/init.py.j2 @@ -0,0 +1,11 @@ +"""Generated River service clients.""" + +{% for mod, cls in imports %} +from {{ mod }} import {{ cls }} +{% endfor %} + +__all__ = [ +{% for _, cls in imports %} + "{{ cls }}", +{% endfor %} +] diff --git a/python-client/river/codegen/templates/service_client.py.j2 b/python-client/river/codegen/templates/service_client.py.j2 new file mode 100644 index 00000000..0558c433 --- /dev/null +++ b/python-client/river/codegen/templates/service_client.py.j2 @@ -0,0 +1,148 @@ +"""Generated client for the {{ service.name }} service.""" + +from __future__ import annotations + +import asyncio +from typing import Any + +from river.client import RiverClient +{% if needs_readable and needs_writable %} +from river.streams import Readable, Writable +{% elif needs_readable %} +from river.streams import Readable +{% elif needs_writable %} +from river.streams import Writable +{% endif %} +{% if type_names %} + +from {{ types_module }} import ( +{% for name in type_names %} + {{ name }}, +{% endfor %} +) +{% endif %} +{% for proc in wrappers %} + + +{% if proc.proc_type == "stream" %} +class {{ service.class_name }}{{ proc.name | pascal }}StreamResult: + """Streaming result for ``{{ service.name }}.{{ proc.name }}``.""" + + def __init__(self, inner: Any) -> None: + self._inner = inner + + @property + def req_writable(self) -> Writable[{{ proc.input_type.annotation }}]: + """Writable stream for sending requests.""" + return self._inner.req_writable + + @property + def res_readable(self) -> Readable[dict[str, Any]]: + """Readable stream for receiving responses.""" + return self._inner.res_readable +{% elif proc.proc_type == "upload" %} +class {{ service.class_name }}{{ proc.name | pascal }}UploadResult: + """Upload result for ``{{ service.name }}.{{ proc.name }}``.""" + + def __init__(self, inner: Any) -> None: + self._inner = inner + + @property + def req_writable(self) -> Writable[{{ proc.input_type.annotation }}]: + """Writable stream for sending requests.""" + return self._inner.req_writable + + async def finalize(self) -> dict[str, Any]: + """Finalize the upload and get the response.""" + return await self._inner.finalize() +{% elif proc.proc_type == "subscription" %} +class {{ service.class_name }}{{ proc.name | pascal }}SubscriptionResult: + """Subscription result for ``{{ service.name }}.{{ proc.name }}``.""" + + def __init__(self, inner: Any) -> None: + self._inner = inner + + @property + def res_readable(self) -> Readable[dict[str, Any]]: + """Readable stream for receiving responses.""" + return self._inner.res_readable +{% endif %} +{% endfor %} + + +class {{ service.class_name }}Client: + """Typed client for the ``{{ service.name }}`` service.""" + + def __init__(self, client: RiverClient) -> None: + self._client = client +{% for proc in service.procedures %} + +{% if proc.proc_type == "rpc" %} + async def {{ proc.py_name }}( + self, + init: {{ proc.init_type.annotation }}, + *, + abort_signal: asyncio.Event | None = None, + ) -> dict[str, Any]: +{% if proc.description %} + """{{ proc.description }}""" +{% endif %} + return await self._client.rpc( + "{{ service.name }}", + "{{ proc.name }}", + init, + abort_signal=abort_signal, + ) +{% elif proc.proc_type == "stream" %} + def {{ proc.py_name }}( + self, + init: {{ proc.init_type.annotation }}, + *, + abort_signal: asyncio.Event | None = None, + ) -> {{ service.class_name }}{{ proc.name | pascal }}StreamResult: +{% if proc.description %} + """{{ proc.description }}""" +{% endif %} + result = self._client.stream( + "{{ service.name }}", + "{{ proc.name }}", + init, + abort_signal=abort_signal, + ) + return {{ service.class_name }}{{ proc.name | pascal }}StreamResult(result) +{% elif proc.proc_type == "upload" %} + def {{ proc.py_name }}( + self, + init: {{ proc.init_type.annotation }}, + *, + abort_signal: asyncio.Event | None = None, + ) -> {{ service.class_name }}{{ proc.name | pascal }}UploadResult: +{% if proc.description %} + """{{ proc.description }}""" +{% endif %} + result = self._client.upload( + "{{ service.name }}", + "{{ proc.name }}", + init, + abort_signal=abort_signal, + ) + return {{ service.class_name }}{{ proc.name | pascal }}UploadResult(result) +{% elif proc.proc_type == "subscription" %} + def {{ proc.py_name }}( + self, + init: {{ proc.init_type.annotation }}, + *, + abort_signal: asyncio.Event | None = None, + ) -> {{ service.class_name }}{{ proc.name | pascal }}SubscriptionResult: +{% if proc.description %} + """{{ proc.description }}""" +{% endif %} + result = self._client.subscribe( + "{{ service.name }}", + "{{ proc.name }}", + init, + abort_signal=abort_signal, + ) + return {{ service.class_name }}{{ proc.name | pascal }}SubscriptionResult(result) +{% endif %} +{% endfor %} diff --git a/python-client/river/codegen/templates/types.py.j2 b/python-client/river/codegen/templates/types.py.j2 new file mode 100644 index 00000000..0614d707 --- /dev/null +++ b/python-client/river/codegen/templates/types.py.j2 @@ -0,0 +1,25 @@ +"""Generated type definitions for River services.""" + +from __future__ import annotations +{% if needs_literal %} + +from typing import Literal +{% endif %} + +from typing_extensions import {{ typing_ext_imports | join(", ") }} +{% for td in typedicts %} + + +class {{ td.name }}(TypedDict): +{% if td.description %} + """{{ td.description }}""" + +{% endif %} +{% if not td.fields %} + pass +{% else %} +{% for f in td.fields %} + {{ f.name }}: {{ f.annotation }} +{% endfor %} +{% endif %} +{% endfor %} diff --git a/python-client/tests/conftest.py b/python-client/tests/conftest.py index 0c0c7f36..b8d99cdd 100644 --- a/python-client/tests/conftest.py +++ b/python-client/tests/conftest.py @@ -11,6 +11,7 @@ import re import signal import subprocess +import sys import time from typing import Generator @@ -19,24 +20,24 @@ TESTS_DIR = os.path.dirname(__file__) SERVER_TS = os.path.join(TESTS_DIR, "test_server.ts") SERVER_MJS = os.path.join(TESTS_DIR, "test_server.mjs") +EXTRACT_SCHEMA_TS = os.path.join(TESTS_DIR, "extract_test_schema.ts") +EXTRACT_SCHEMA_MJS = os.path.join(TESTS_DIR, "extract_test_schema.mjs") +SCHEMA_JSON = os.path.join(TESTS_DIR, "test_schema.json") +GENERATED_DIR = os.path.join(TESTS_DIR, "generated") RIVER_ROOT = os.path.abspath(os.path.join(TESTS_DIR, "..", "..")) ESBUILD = os.path.join(RIVER_ROOT, "node_modules", ".bin", "esbuild") -def _build_test_server() -> None: - """Bundle test_server.ts -> test_server.mjs using esbuild. - - esbuild handles the river repo's bundler-style module resolution at - build time, producing a single ESM file that plain ``node`` can run. - """ +def _esbuild_bundle(ts_path: str, mjs_path: str) -> None: + """Bundle a .ts file to .mjs using esbuild.""" result = subprocess.run( [ ESBUILD, - SERVER_TS, + ts_path, "--bundle", "--platform=node", "--format=esm", - f"--outfile={SERVER_MJS}", + f"--outfile={mjs_path}", # keep heavy deps external so the bundle stays small and # we reuse whatever is already in node_modules "--external:ws", @@ -50,6 +51,54 @@ def _build_test_server() -> None: raise RuntimeError(f"esbuild failed ({result.returncode}):\n{result.stderr}") +def _build_test_server() -> None: + """Bundle test_server.ts -> test_server.mjs using esbuild.""" + _esbuild_bundle(SERVER_TS, SERVER_MJS) + + +def _extract_test_schema() -> None: + """Bundle and run extract_test_schema.ts to produce test_schema.json, + then run codegen to produce the generated client module.""" + _esbuild_bundle(EXTRACT_SCHEMA_TS, EXTRACT_SCHEMA_MJS) + result = subprocess.run( + ["node", EXTRACT_SCHEMA_MJS], + cwd=RIVER_ROOT, + capture_output=True, + text=True, + ) + if result.returncode != 0: + raise RuntimeError( + f"extract_test_schema failed ({result.returncode}):\n{result.stderr}" + ) + + # Run codegen + result = subprocess.run( + [ + sys.executable, + "-m", + "river.codegen", + "--schema", + SCHEMA_JSON, + "--output", + GENERATED_DIR, + ], + cwd=os.path.join(RIVER_ROOT, "python-client"), + capture_output=True, + text=True, + ) + if result.returncode != 0: + raise RuntimeError( + f"codegen failed ({result.returncode}):\n{result.stderr}\n{result.stdout}" + ) + + +@pytest.fixture(scope="session") +def generated_client_dir() -> str: + """Extract test schema and run codegen. Returns the generated dir path.""" + _extract_test_schema() + return GENERATED_DIR + + @pytest.fixture(scope="session") def event_loop(): """Create an event loop for the entire test session.""" diff --git a/python-client/tests/extract_test_schema.mjs b/python-client/tests/extract_test_schema.mjs new file mode 100644 index 00000000..db334f3b --- /dev/null +++ b/python-client/tests/extract_test_schema.mjs @@ -0,0 +1,758 @@ +// python-client/tests/extract_test_schema.ts +import fs from "node:fs"; +import path from "node:path"; + +// router/services.ts +import { Type as Type2, Kind as Kind2 } from "@sinclair/typebox"; + +// router/errors.ts +import { + Kind, + Type +} from "@sinclair/typebox"; +var UNCAUGHT_ERROR_CODE = "UNCAUGHT_ERROR"; +var UNEXPECTED_DISCONNECT_CODE = "UNEXPECTED_DISCONNECT"; +var INVALID_REQUEST_CODE = "INVALID_REQUEST"; +var CANCEL_CODE = "CANCEL"; +var ErrResultSchema = (t) => Type.Object({ + ok: Type.Literal(false), + payload: t +}); +var ValidationErrorDetails = Type.Object({ + path: Type.String(), + message: Type.String() +}); +var ValidationErrors = Type.Array(ValidationErrorDetails); +var CancelErrorSchema = Type.Object({ + code: Type.Literal(CANCEL_CODE), + message: Type.String() +}); +var CancelResultSchema = ErrResultSchema(CancelErrorSchema); +var ReaderErrorSchema = Type.Union([ + Type.Object({ + code: Type.Literal(UNCAUGHT_ERROR_CODE), + message: Type.String() + }), + Type.Object({ + code: Type.Literal(UNEXPECTED_DISCONNECT_CODE), + message: Type.String() + }), + Type.Object({ + code: Type.Literal(INVALID_REQUEST_CODE), + message: Type.String(), + extras: Type.Optional( + Type.Object({ + firstValidationErrors: Type.Array(ValidationErrorDetails), + totalErrors: Type.Number() + }) + ) + }), + CancelErrorSchema +]); +var ReaderErrorResultSchema = ErrResultSchema(ReaderErrorSchema); +function isUnion(schema2) { + return schema2[Kind] === "Union"; +} +function flattenErrorType(errType) { + if (!isUnion(errType)) { + return errType; + } + const flattenedTypes = []; + function flatten(type) { + if (isUnion(type)) { + for (const t of type.anyOf) { + flatten(t); + } + } else { + flattenedTypes.push(type); + } + } + flatten(errType); + return Type.Union(flattenedTypes); +} + +// router/services.ts +function Strict(schema2) { + return JSON.parse(JSON.stringify(schema2)); +} +function serializeSchema(services2, handshakeSchema) { + const serializedServiceObject = Object.entries(services2).reduce((acc, [name, value]) => { + acc[name] = value.serialize(); + return acc; + }, {}); + const schema2 = { + services: serializedServiceObject + }; + if (handshakeSchema) { + schema2.handshakeSchema = Strict(handshakeSchema); + } + return schema2; +} +function createServiceSchema() { + return class ServiceSchema2 { + /** + * Factory function for creating a fresh state. + */ + initializeState; + /** + * The procedures for this service. + */ + procedures; + /** + * @param config - The configuration for this service. + * @param procedures - The procedures for this service. + */ + constructor(config, procedures) { + this.initializeState = config.initializeState; + this.procedures = procedures; + } + /** + * Creates a {@link ServiceScaffold}, which can be used to define procedures + * that can then be merged into a {@link ServiceSchema}, via the scaffold's + * `finalize` method. + * + * There are two patterns that work well with this method. The first is using + * it to separate the definition of procedures from the definition of the + * service's configuration: + * ```ts + * const MyServiceScaffold = ServiceSchema.scaffold({ + * initializeState: () => ({ count: 0 }), + * }); + * + * const incrementProcedures = MyServiceScaffold.procedures({ + * increment: Procedure.rpc({ + * requestInit: Type.Object({ amount: Type.Number() }), + * responseData: Type.Object({ current: Type.Number() }), + * async handler(ctx, init) { + * ctx.state.count += init.amount; + * return Ok({ current: ctx.state.count }); + * } + * }), + * }) + * + * const MyService = MyServiceScaffold.finalize({ + * ...incrementProcedures, + * // you can also directly define procedures here + * }); + * ``` + * This might be really handy if you have a very large service and you're + * wanting to split it over multiple files. You can define the scaffold + * in one file, and then import that scaffold in other files where you + * define procedures - and then finally import the scaffolds and your + * procedure objects in a final file where you finalize the scaffold into + * a service schema. + * + * The other way is to use it like in a builder pattern: + * ```ts + * const MyService = ServiceSchema + * .scaffold({ initializeState: () => ({ count: 0 }) }) + * .finalize({ + * increment: Procedure.rpc({ + * requestInit: Type.Object({ amount: Type.Number() }), + * responseData: Type.Object({ current: Type.Number() }), + * async handler(ctx, init) { + * ctx.state.count += init.amount; + * return Ok({ current: ctx.state.count }); + * } + * }), + * }) + * ``` + * Depending on your preferences, this may be a more appealing way to define + * a schema versus using the {@link ServiceSchema.define} method. + */ + static scaffold(config) { + return new ServiceScaffold(config); + } + // actual implementation + static define(configOrProcedures, maybeProcedures) { + let config; + let procedures; + if ("initializeState" in configOrProcedures && typeof configOrProcedures.initializeState === "function") { + if (!maybeProcedures) { + throw new Error("Expected procedures to be defined"); + } + config = configOrProcedures; + procedures = maybeProcedures; + } else { + config = { initializeState: () => ({}) }; + procedures = configOrProcedures; + } + return new ServiceSchema2(config, procedures); + } + /** + * Serializes this schema's procedures into a plain object that is JSON compatible. + */ + serialize() { + return { + procedures: Object.fromEntries( + Object.entries(this.procedures).map(([procName, procDef]) => [ + procName, + { + init: Strict(procDef.requestInit), + output: Strict(procDef.responseData), + errors: getSerializedProcErrors(procDef), + // Only add `description` field if the type declares it. + ..."description" in procDef ? { description: procDef.description } : {}, + type: procDef.type, + // Only add the `input` field if the type declares it. + ..."requestData" in procDef ? { + input: Strict(procDef.requestData) + } : {} + } + ]) + ) + }; + } + // TODO remove once clients migrate to v2 + /** + * Same as {@link ServiceSchema.serialize}, but with a format that is compatible with + * protocol v1. This is useful to be able to continue to generate schemas for older + * clients as they are still supported. + */ + serializeV1Compat() { + return { + procedures: Object.fromEntries( + Object.entries(this.procedures).map( + ([procName, procDef]) => { + if (procDef.type === "rpc" || procDef.type === "subscription") { + return [ + procName, + { + // BACKWARDS COMPAT: map init to input for protocolv1 + // this is the only change needed to make it compatible. + input: Strict(procDef.requestInit), + output: Strict(procDef.responseData), + errors: getSerializedProcErrors(procDef), + // Only add `description` field if the type declares it. + ..."description" in procDef ? { description: procDef.description } : {}, + type: procDef.type + } + ]; + } + return [ + procName, + { + init: Strict(procDef.requestInit), + output: Strict(procDef.responseData), + errors: getSerializedProcErrors(procDef), + // Only add `description` field if the type declares it. + ..."description" in procDef ? { description: procDef.description } : {}, + type: procDef.type, + input: Strict(procDef.requestData) + } + ]; + } + ) + ) + }; + } + /** + * Instantiates this schema into a {@link Service} object. + * + * You probably don't need this, usually the River server will handle this + * for you. + */ + instantiate(extendedContext) { + const state = this.initializeState(extendedContext); + const dispose = async () => { + await state[Symbol.asyncDispose]?.(); + state[Symbol.dispose]?.(); + }; + return Object.freeze({ + state, + procedures: this.procedures, + [Symbol.asyncDispose]: dispose + }); + } + }; +} +function getSerializedProcErrors(procDef) { + if (!("responseError" in procDef) || procDef.responseError[Kind2] === "Never") { + return Strict(ReaderErrorSchema); + } + const withProtocolErrors = flattenErrorType( + Type2.Union([procDef.responseError, ReaderErrorSchema]) + ); + return Strict(withProtocolErrors); +} +var ServiceScaffold = class { + /** + * The configuration for this service. + */ + config; + /** + * @param config - The configuration for this service. + */ + constructor(config) { + this.config = config; + } + /** + * Define procedures for this service. Use the {@link Procedure} constructors + * to create them. This returns the procedures object, which can then be + * passed to {@link ServiceSchema.finalize} to create a {@link ServiceSchema}. + * + * @example + * ``` + * const myProcedures = MyServiceScaffold.procedures({ + * myRPC: Procedure.rpc({ + * // ... + * }), + * }); + * + * const MyService = MyServiceScaffold.finalize({ + * ...myProcedures, + * }); + * ``` + * + * @param procedures - The procedures for this service. + */ + procedures(procedures) { + return procedures; + } + /** + * Finalizes the scaffold into a {@link ServiceSchema}. This is where you + * provide the service's procedures and get a {@link ServiceSchema} in return. + * + * You can directly define procedures here, or you can define them separately + * with the {@link ServiceScaffold.procedures} method, and then pass them here. + * + * @example + * ``` + * const MyService = MyServiceScaffold.finalize({ + * myRPC: Procedure.rpc({ + * // ... + * }), + * // e.g. from the procedures method + * ...myOtherProcedures, + * }); + * ``` + */ + finalize(procedures) { + return createServiceSchema().define( + this.config, + procedures + ); + } +}; + +// router/result.ts +import { Type as Type3 } from "@sinclair/typebox"; +var AnyResultSchema = Type3.Union([ + Type3.Object({ + ok: Type3.Literal(false), + payload: Type3.Object({ + code: Type3.String(), + message: Type3.String(), + extras: Type3.Optional(Type3.Unknown()) + }) + }), + Type3.Object({ + ok: Type3.Literal(true), + payload: Type3.Unknown() + }) +]); +function Ok(payload) { + return { + ok: true, + payload + }; +} +function Err(error) { + return { + ok: false, + payload: error + }; +} + +// router/procedures.ts +import { Type as Type4 } from "@sinclair/typebox"; +function rpc({ + requestInit, + responseData, + responseError = Type4.Never(), + description, + handler +}) { + return { + ...description ? { description } : {}, + type: "rpc", + requestInit, + responseData, + responseError, + handler + }; +} +function upload({ + requestInit, + requestData, + responseData, + responseError = Type4.Never(), + description, + handler +}) { + return { + type: "upload", + ...description ? { description } : {}, + requestInit, + requestData, + responseData, + responseError, + handler + }; +} +function subscription({ + requestInit, + responseData, + responseError = Type4.Never(), + description, + handler +}) { + return { + type: "subscription", + ...description ? { description } : {}, + requestInit, + responseData, + responseError, + handler + }; +} +function stream({ + requestInit, + requestData, + responseData, + responseError = Type4.Never(), + description, + handler +}) { + return { + type: "stream", + ...description ? { description } : {}, + requestInit, + requestData, + responseData, + responseError, + handler + }; +} +var Procedure = { + rpc, + upload, + subscription, + stream +}; + +// python-client/tests/extract_test_schema.ts +import { Type as Type5 } from "@sinclair/typebox"; +var ServiceSchema = createServiceSchema(); +var count = 0; +var TestServiceSchema = ServiceSchema.define({ + add: Procedure.rpc({ + requestInit: Type5.Object({ n: Type5.Number() }), + responseData: Type5.Object({ result: Type5.Number() }), + responseError: Type5.Never(), + async handler({ reqInit }) { + count += reqInit.n; + return Ok({ result: count }); + } + }), + echo: Procedure.stream({ + requestInit: Type5.Object({}), + requestData: Type5.Object({ + msg: Type5.String(), + ignore: Type5.Optional(Type5.Boolean()) + }), + responseData: Type5.Object({ response: Type5.String() }), + responseError: Type5.Never(), + async handler({ reqReadable, resWritable }) { + for await (const result of reqReadable) { + if (!result.ok) break; + const val = result.payload; + if (val.ignore) continue; + resWritable.write(Ok({ response: val.msg })); + } + resWritable.close(); + } + }), + echoWithPrefix: Procedure.stream({ + requestInit: Type5.Object({ prefix: Type5.String() }), + requestData: Type5.Object({ + msg: Type5.String(), + ignore: Type5.Optional(Type5.Boolean()) + }), + responseData: Type5.Object({ response: Type5.String() }), + responseError: Type5.Never(), + async handler({ reqInit, reqReadable, resWritable }) { + for await (const result of reqReadable) { + if (!result.ok) break; + const val = result.payload; + if (val.ignore) continue; + resWritable.write(Ok({ response: `${reqInit.prefix} ${val.msg}` })); + } + resWritable.close(); + } + }) +}); +var msgs = []; +var OrderingServiceSchema = ServiceSchema.define({ + add: Procedure.rpc({ + requestInit: Type5.Object({ n: Type5.Number() }), + responseData: Type5.Object({ n: Type5.Number() }), + responseError: Type5.Never(), + async handler({ reqInit }) { + msgs.push(reqInit.n); + return Ok({ n: reqInit.n }); + } + }), + getAll: Procedure.rpc({ + requestInit: Type5.Object({}), + responseData: Type5.Object({ msgs: Type5.Array(Type5.Number()) }), + responseError: Type5.Never(), + async handler() { + return Ok({ msgs: [...msgs] }); + } + }) +}); +var FallibleServiceSchema = ServiceSchema.define({ + divide: Procedure.rpc({ + requestInit: Type5.Object({ a: Type5.Number(), b: Type5.Number() }), + responseData: Type5.Object({ result: Type5.Number() }), + responseError: Type5.Union([ + Type5.Object({ + code: Type5.Literal("DIV_BY_ZERO"), + message: Type5.String() + }), + Type5.Object({ + code: Type5.Literal("INFINITY"), + message: Type5.String() + }) + ]), + async handler({ reqInit }) { + if (reqInit.b === 0) { + return Err({ code: "DIV_BY_ZERO", message: "Cannot divide by zero" }); + } + const result = reqInit.a / reqInit.b; + if (!isFinite(result)) { + return Err({ code: "INFINITY", message: "Result is infinity" }); + } + return Ok({ result }); + } + }), + echo: Procedure.stream({ + requestInit: Type5.Object({}), + requestData: Type5.Object({ + msg: Type5.String(), + throwResult: Type5.Optional(Type5.Boolean()), + throwError: Type5.Optional(Type5.Boolean()) + }), + responseData: Type5.Object({ response: Type5.String() }), + responseError: Type5.Object({ + code: Type5.Literal("STREAM_ERROR"), + message: Type5.String() + }), + async handler({ reqReadable, resWritable }) { + for await (const result of reqReadable) { + if (!result.ok) break; + const val = result.payload; + if (val.throwError) throw new Error("uncaught error"); + if (val.throwResult) { + resWritable.write(Err({ code: "STREAM_ERROR", message: "stream error" })); + continue; + } + resWritable.write(Ok({ response: val.msg })); + } + resWritable.close(); + } + }) +}); +var subCount = 0; +var subListeners = /* @__PURE__ */ new Set(); +var SubscribableServiceSchema = ServiceSchema.define({ + add: Procedure.rpc({ + requestInit: Type5.Object({ n: Type5.Number() }), + responseData: Type5.Object({ result: Type5.Number() }), + responseError: Type5.Never(), + async handler({ reqInit }) { + subCount += reqInit.n; + for (const l of subListeners) l(subCount); + return Ok({ result: subCount }); + } + }), + value: Procedure.subscription({ + requestInit: Type5.Object({}), + responseData: Type5.Object({ count: Type5.Number() }), + responseError: Type5.Never(), + async handler({ resWritable, ctx }) { + resWritable.write(Ok({ count: subCount })); + const listener = (val) => { + resWritable.write(Ok({ count: val })); + }; + subListeners.add(listener); + ctx.signal.addEventListener("abort", () => { + subListeners.delete(listener); + resWritable.close(); + }); + } + }) +}); +var UploadableServiceSchema = ServiceSchema.define({ + addMultiple: Procedure.upload({ + requestInit: Type5.Object({}), + requestData: Type5.Object({ n: Type5.Number() }), + responseData: Type5.Object({ result: Type5.Number() }), + responseError: Type5.Never(), + async handler({ reqReadable }) { + let total = 0; + for await (const result of reqReadable) { + if (!result.ok) break; + total += result.payload.n; + } + return Ok({ result: total }); + } + }), + addMultipleWithPrefix: Procedure.upload({ + requestInit: Type5.Object({ prefix: Type5.String() }), + requestData: Type5.Object({ n: Type5.Number() }), + responseData: Type5.Object({ result: Type5.String() }), + responseError: Type5.Never(), + async handler({ reqInit, reqReadable }) { + let total = 0; + for await (const result of reqReadable) { + if (!result.ok) break; + total += result.payload.n; + } + return Ok({ result: `${reqInit.prefix} ${total}` }); + } + }), + cancellableAdd: Procedure.upload({ + requestInit: Type5.Object({}), + requestData: Type5.Object({ n: Type5.Number() }), + responseData: Type5.Object({ result: Type5.Number() }), + responseError: Type5.Object({ + code: Type5.Literal("CANCEL"), + message: Type5.String() + }), + async handler({ reqReadable, ctx }) { + let total = 0; + for await (const result of reqReadable) { + if (!result.ok) break; + total += result.payload.n; + if (total >= 10) { + ctx.cancel(); + return Err({ code: "CANCEL", message: "total exceeds limit" }); + } + } + return Ok({ result: total }); + } + }) +}); +var CancellationServiceSchema = ServiceSchema.define({ + blockingRpc: Procedure.rpc({ + requestInit: Type5.Object({}), + responseData: Type5.Object({}), + responseError: Type5.Never(), + async handler({ ctx }) { + return new Promise((_resolve) => { + ctx.signal.addEventListener("abort", () => { + }); + }); + } + }), + blockingStream: Procedure.stream({ + requestInit: Type5.Object({}), + requestData: Type5.Object({}), + responseData: Type5.Object({}), + responseError: Type5.Never(), + async handler(_ctx) { + return new Promise(() => { + }); + } + }), + blockingUpload: Procedure.upload({ + requestInit: Type5.Object({}), + requestData: Type5.Object({}), + responseData: Type5.Object({}), + responseError: Type5.Never(), + async handler(_ctx) { + return new Promise(() => { + }); + } + }), + blockingSubscription: Procedure.subscription({ + requestInit: Type5.Object({}), + responseData: Type5.Object({}), + responseError: Type5.Never(), + async handler(_ctx) { + return new Promise(() => { + }); + } + }), + immediateRpc: Procedure.rpc({ + requestInit: Type5.Object({}), + responseData: Type5.Object({ done: Type5.Boolean() }), + responseError: Type5.Never(), + async handler() { + return Ok({ done: true }); + } + }), + immediateStream: Procedure.stream({ + requestInit: Type5.Object({}), + requestData: Type5.Object({}), + responseData: Type5.Object({ done: Type5.Boolean() }), + responseError: Type5.Never(), + async handler({ reqReadable, resWritable }) { + resWritable.write(Ok({ done: true })); + for await (const result of reqReadable) { + if (!result.ok) break; + } + resWritable.close(); + } + }), + immediateUpload: Procedure.upload({ + requestInit: Type5.Object({}), + requestData: Type5.Object({}), + responseData: Type5.Object({ done: Type5.Boolean() }), + responseError: Type5.Never(), + async handler({ reqReadable }) { + for await (const result of reqReadable) { + if (!result.ok) break; + } + return Ok({ done: true }); + } + }), + immediateSubscription: Procedure.subscription({ + requestInit: Type5.Object({}), + responseData: Type5.Object({ done: Type5.Boolean() }), + responseError: Type5.Never(), + async handler({ resWritable }) { + resWritable.write(Ok({ done: true })); + resWritable.close(); + } + }), + countedStream: Procedure.stream({ + requestInit: Type5.Object({ total: Type5.Number() }), + requestData: Type5.Object({}), + responseData: Type5.Object({ i: Type5.Number() }), + responseError: Type5.Never(), + async handler({ reqInit, reqReadable, resWritable }) { + for (let i = 0; i < reqInit.total; i++) { + resWritable.write(Ok({ i })); + } + for await (const result of reqReadable) { + if (!result.ok) break; + } + resWritable.close(); + } + }) +}); +var services = { + test: TestServiceSchema, + ordering: OrderingServiceSchema, + fallible: FallibleServiceSchema, + subscribable: SubscribableServiceSchema, + uploadable: UploadableServiceSchema, + cancel: CancellationServiceSchema +}; +var schema = serializeSchema(services); +var outPath = path.join(path.dirname(new URL(import.meta.url).pathname), "test_schema.json"); +fs.writeFileSync(outPath, JSON.stringify(schema, null, 2)); +console.log(`Wrote schema to ${outPath}`); diff --git a/python-client/tests/extract_test_schema.ts b/python-client/tests/extract_test_schema.ts new file mode 100644 index 00000000..e36fef81 --- /dev/null +++ b/python-client/tests/extract_test_schema.ts @@ -0,0 +1,361 @@ +/** + * Extract the test server schema to a JSON file for codegen tests. + * + * Usage (from river repo root): + * node python-client/tests/extract_test_schema.mjs + * + * Outputs: python-client/tests/test_schema.json + */ +import fs from 'node:fs'; +import path from 'node:path'; +import { + createServiceSchema, + Procedure, + Ok, + Err, + serializeSchema, +} from '../../router'; +import { Type } from '@sinclair/typebox'; + +const ServiceSchema = createServiceSchema(); + +// ------------------------------------------------------------------- +// TestService +// ------------------------------------------------------------------- +let count = 0; + +const TestServiceSchema = ServiceSchema.define({ + add: Procedure.rpc({ + requestInit: Type.Object({ n: Type.Number() }), + responseData: Type.Object({ result: Type.Number() }), + responseError: Type.Never(), + async handler({ reqInit }) { + count += reqInit.n; + return Ok({ result: count }); + }, + }), + echo: Procedure.stream({ + requestInit: Type.Object({}), + requestData: Type.Object({ + msg: Type.String(), + ignore: Type.Optional(Type.Boolean()), + }), + responseData: Type.Object({ response: Type.String() }), + responseError: Type.Never(), + async handler({ reqReadable, resWritable }) { + for await (const result of reqReadable) { + if (!result.ok) break; + const val = result.payload; + if (val.ignore) continue; + resWritable.write(Ok({ response: val.msg })); + } + resWritable.close(); + }, + }), + echoWithPrefix: Procedure.stream({ + requestInit: Type.Object({ prefix: Type.String() }), + requestData: Type.Object({ + msg: Type.String(), + ignore: Type.Optional(Type.Boolean()), + }), + responseData: Type.Object({ response: Type.String() }), + responseError: Type.Never(), + async handler({ reqInit, reqReadable, resWritable }) { + for await (const result of reqReadable) { + if (!result.ok) break; + const val = result.payload; + if (val.ignore) continue; + resWritable.write(Ok({ response: `${reqInit.prefix} ${val.msg}` })); + } + resWritable.close(); + }, + }), +}); + +// ------------------------------------------------------------------- +// OrderingService +// ------------------------------------------------------------------- +const msgs: Array = []; + +const OrderingServiceSchema = ServiceSchema.define({ + add: Procedure.rpc({ + requestInit: Type.Object({ n: Type.Number() }), + responseData: Type.Object({ n: Type.Number() }), + responseError: Type.Never(), + async handler({ reqInit }) { + msgs.push(reqInit.n); + return Ok({ n: reqInit.n }); + }, + }), + getAll: Procedure.rpc({ + requestInit: Type.Object({}), + responseData: Type.Object({ msgs: Type.Array(Type.Number()) }), + responseError: Type.Never(), + async handler() { + return Ok({ msgs: [...msgs] }); + }, + }), +}); + +// ------------------------------------------------------------------- +// FallibleService +// ------------------------------------------------------------------- +const FallibleServiceSchema = ServiceSchema.define({ + divide: Procedure.rpc({ + requestInit: Type.Object({ a: Type.Number(), b: Type.Number() }), + responseData: Type.Object({ result: Type.Number() }), + responseError: Type.Union([ + Type.Object({ + code: Type.Literal('DIV_BY_ZERO'), + message: Type.String(), + }), + Type.Object({ + code: Type.Literal('INFINITY'), + message: Type.String(), + }), + ]), + async handler({ reqInit }) { + if (reqInit.b === 0) { + return Err({ code: 'DIV_BY_ZERO' as const, message: 'Cannot divide by zero' }); + } + const result = reqInit.a / reqInit.b; + if (!isFinite(result)) { + return Err({ code: 'INFINITY' as const, message: 'Result is infinity' }); + } + return Ok({ result }); + }, + }), + echo: Procedure.stream({ + requestInit: Type.Object({}), + requestData: Type.Object({ + msg: Type.String(), + throwResult: Type.Optional(Type.Boolean()), + throwError: Type.Optional(Type.Boolean()), + }), + responseData: Type.Object({ response: Type.String() }), + responseError: Type.Object({ + code: Type.Literal('STREAM_ERROR'), + message: Type.String(), + }), + async handler({ reqReadable, resWritable }) { + for await (const result of reqReadable) { + if (!result.ok) break; + const val = result.payload; + if (val.throwError) throw new Error('uncaught error'); + if (val.throwResult) { + resWritable.write(Err({ code: 'STREAM_ERROR' as const, message: 'stream error' })); + continue; + } + resWritable.write(Ok({ response: val.msg })); + } + resWritable.close(); + }, + }), +}); + +// ------------------------------------------------------------------- +// SubscribableService +// ------------------------------------------------------------------- +let subCount = 0; +type SubListener = (val: number) => void; +const subListeners = new Set(); + +const SubscribableServiceSchema = ServiceSchema.define({ + add: Procedure.rpc({ + requestInit: Type.Object({ n: Type.Number() }), + responseData: Type.Object({ result: Type.Number() }), + responseError: Type.Never(), + async handler({ reqInit }) { + subCount += reqInit.n; + for (const l of subListeners) l(subCount); + return Ok({ result: subCount }); + }, + }), + value: Procedure.subscription({ + requestInit: Type.Object({}), + responseData: Type.Object({ count: Type.Number() }), + responseError: Type.Never(), + async handler({ resWritable, ctx }) { + resWritable.write(Ok({ count: subCount })); + const listener: SubListener = (val) => { + resWritable.write(Ok({ count: val })); + }; + subListeners.add(listener); + ctx.signal.addEventListener('abort', () => { + subListeners.delete(listener); + resWritable.close(); + }); + }, + }), +}); + +// ------------------------------------------------------------------- +// UploadableService +// ------------------------------------------------------------------- +const UploadableServiceSchema = ServiceSchema.define({ + addMultiple: Procedure.upload({ + requestInit: Type.Object({}), + requestData: Type.Object({ n: Type.Number() }), + responseData: Type.Object({ result: Type.Number() }), + responseError: Type.Never(), + async handler({ reqReadable }) { + let total = 0; + for await (const result of reqReadable) { + if (!result.ok) break; + total += result.payload.n; + } + return Ok({ result: total }); + }, + }), + addMultipleWithPrefix: Procedure.upload({ + requestInit: Type.Object({ prefix: Type.String() }), + requestData: Type.Object({ n: Type.Number() }), + responseData: Type.Object({ result: Type.String() }), + responseError: Type.Never(), + async handler({ reqInit, reqReadable }) { + let total = 0; + for await (const result of reqReadable) { + if (!result.ok) break; + total += result.payload.n; + } + return Ok({ result: `${reqInit.prefix} ${total}` }); + }, + }), + cancellableAdd: Procedure.upload({ + requestInit: Type.Object({}), + requestData: Type.Object({ n: Type.Number() }), + responseData: Type.Object({ result: Type.Number() }), + responseError: Type.Object({ + code: Type.Literal('CANCEL'), + message: Type.String(), + }), + async handler({ reqReadable, ctx }) { + let total = 0; + for await (const result of reqReadable) { + if (!result.ok) break; + total += result.payload.n; + if (total >= 10) { + ctx.cancel(); + return Err({ code: 'CANCEL' as const, message: 'total exceeds limit' }); + } + } + return Ok({ result: total }); + }, + }), +}); + +// ------------------------------------------------------------------- +// CancellationService +// ------------------------------------------------------------------- +const CancellationServiceSchema = ServiceSchema.define({ + blockingRpc: Procedure.rpc({ + requestInit: Type.Object({}), + responseData: Type.Object({}), + responseError: Type.Never(), + async handler({ ctx }) { + return new Promise((_resolve) => { + ctx.signal.addEventListener('abort', () => {}); + }); + }, + }), + blockingStream: Procedure.stream({ + requestInit: Type.Object({}), + requestData: Type.Object({}), + responseData: Type.Object({}), + responseError: Type.Never(), + async handler(_ctx) { + return new Promise(() => {}); + }, + }), + blockingUpload: Procedure.upload({ + requestInit: Type.Object({}), + requestData: Type.Object({}), + responseData: Type.Object({}), + responseError: Type.Never(), + async handler(_ctx) { + return new Promise(() => {}); + }, + }), + blockingSubscription: Procedure.subscription({ + requestInit: Type.Object({}), + responseData: Type.Object({}), + responseError: Type.Never(), + async handler(_ctx) { + return new Promise(() => {}); + }, + }), + immediateRpc: Procedure.rpc({ + requestInit: Type.Object({}), + responseData: Type.Object({ done: Type.Boolean() }), + responseError: Type.Never(), + async handler() { + return Ok({ done: true }); + }, + }), + immediateStream: Procedure.stream({ + requestInit: Type.Object({}), + requestData: Type.Object({}), + responseData: Type.Object({ done: Type.Boolean() }), + responseError: Type.Never(), + async handler({ reqReadable, resWritable }) { + resWritable.write(Ok({ done: true })); + for await (const result of reqReadable) { + if (!result.ok) break; + } + resWritable.close(); + }, + }), + immediateUpload: Procedure.upload({ + requestInit: Type.Object({}), + requestData: Type.Object({}), + responseData: Type.Object({ done: Type.Boolean() }), + responseError: Type.Never(), + async handler({ reqReadable }) { + for await (const result of reqReadable) { + if (!result.ok) break; + } + return Ok({ done: true }); + }, + }), + immediateSubscription: Procedure.subscription({ + requestInit: Type.Object({}), + responseData: Type.Object({ done: Type.Boolean() }), + responseError: Type.Never(), + async handler({ resWritable }) { + resWritable.write(Ok({ done: true })); + resWritable.close(); + }, + }), + countedStream: Procedure.stream({ + requestInit: Type.Object({ total: Type.Number() }), + requestData: Type.Object({}), + responseData: Type.Object({ i: Type.Number() }), + responseError: Type.Never(), + async handler({ reqInit, reqReadable, resWritable }) { + for (let i = 0; i < reqInit.total; i++) { + resWritable.write(Ok({ i })); + } + for await (const result of reqReadable) { + if (!result.ok) break; + } + resWritable.close(); + }, + }), +}); + +// ------------------------------------------------------------------- +// Serialize and write +// ------------------------------------------------------------------- +const services = { + test: TestServiceSchema, + ordering: OrderingServiceSchema, + fallible: FallibleServiceSchema, + subscribable: SubscribableServiceSchema, + uploadable: UploadableServiceSchema, + cancel: CancellationServiceSchema, +}; + +const schema = serializeSchema(services); +const outPath = path.join(path.dirname(new URL(import.meta.url).pathname), 'test_schema.json'); +fs.writeFileSync(outPath, JSON.stringify(schema, null, 2)); +console.log(`Wrote schema to ${outPath}`); diff --git a/python-client/tests/generated/__init__.py b/python-client/tests/generated/__init__.py new file mode 100644 index 00000000..0668cffd --- /dev/null +++ b/python-client/tests/generated/__init__.py @@ -0,0 +1,17 @@ +"""Generated River service clients.""" + +from .cancel_client import CancelClient +from .fallible_client import FallibleClient +from .ordering_client import OrderingClient +from .subscribable_client import SubscribableClient +from .test_client import TestClient +from .uploadable_client import UploadableClient + +__all__ = [ + "CancelClient", + "FallibleClient", + "OrderingClient", + "SubscribableClient", + "TestClient", + "UploadableClient", +] diff --git a/python-client/tests/generated/_errors.py b/python-client/tests/generated/_errors.py new file mode 100644 index 00000000..ba3e8c37 --- /dev/null +++ b/python-client/tests/generated/_errors.py @@ -0,0 +1,45 @@ +"""Protocol-level error types for the River protocol. + +These errors can be returned by any procedure regardless of its +service-specific error schema. +""" + +from __future__ import annotations + +from typing import Literal + +from typing_extensions import NotRequired, TypedDict + + +class UncaughtError(TypedDict): + code: Literal["UNCAUGHT_ERROR"] + message: str + + +class UnexpectedDisconnect(TypedDict): + code: Literal["UNEXPECTED_DISCONNECT"] + message: str + + +class InvalidRequestExtrasItem(TypedDict): + path: str + message: str + + +class InvalidRequestExtras(TypedDict): + firstValidationErrors: list[InvalidRequestExtrasItem] + totalErrors: float + + +class InvalidRequest(TypedDict): + code: Literal["INVALID_REQUEST"] + message: str + extras: NotRequired[InvalidRequestExtras] + + +class Cancel(TypedDict): + code: Literal["CANCEL"] + message: str + + +ProtocolError = UncaughtError | UnexpectedDisconnect | InvalidRequest | Cancel diff --git a/python-client/tests/generated/_types.py b/python-client/tests/generated/_types.py new file mode 100644 index 00000000..cac1af4f --- /dev/null +++ b/python-client/tests/generated/_types.py @@ -0,0 +1,239 @@ +"""Generated type definitions for River services.""" + +from __future__ import annotations + +from typing import Literal + +from typing_extensions import NotRequired, TypedDict + + +class TestAddInit(TypedDict): + n: float + + +class TestAddOutput(TypedDict): + result: float + + +class TestEchoInit(TypedDict): + pass + + +class TestEchoInput(TypedDict): + msg: str + ignore: NotRequired[bool] + + +class TestEchoOutput(TypedDict): + response: str + + +class TestEchoWithPrefixInit(TypedDict): + prefix: str + + +class TestEchoWithPrefixInput(TypedDict): + msg: str + ignore: NotRequired[bool] + + +class TestEchoWithPrefixOutput(TypedDict): + response: str + + +class OrderingAddInit(TypedDict): + n: float + + +class OrderingAddOutput(TypedDict): + n: float + + +class OrderingGetAllInit(TypedDict): + pass + + +class OrderingGetAllOutput(TypedDict): + msgs: list[float] + + +class FallibleDivideInit(TypedDict): + a: float + b: float + + +class FallibleDivideOutput(TypedDict): + result: float + + +class FallibleDivideErrorDivByZero(TypedDict): + code: Literal["DIV_BY_ZERO"] + message: str + + +class FallibleDivideErrorInfinity(TypedDict): + code: Literal["INFINITY"] + message: str + + +class FallibleEchoInit(TypedDict): + pass + + +class FallibleEchoInput(TypedDict): + msg: str + throwResult: NotRequired[bool] + throwError: NotRequired[bool] + + +class FallibleEchoOutput(TypedDict): + response: str + + +class FallibleEchoError(TypedDict): + code: Literal["STREAM_ERROR"] + message: str + + +class SubscribableAddInit(TypedDict): + n: float + + +class SubscribableAddOutput(TypedDict): + result: float + + +class SubscribableValueInit(TypedDict): + pass + + +class SubscribableValueOutput(TypedDict): + count: float + + +class UploadableAddMultipleInit(TypedDict): + pass + + +class UploadableAddMultipleInput(TypedDict): + n: float + + +class UploadableAddMultipleOutput(TypedDict): + result: float + + +class UploadableAddMultipleWithPrefixInit(TypedDict): + prefix: str + + +class UploadableAddMultipleWithPrefixInput(TypedDict): + n: float + + +class UploadableAddMultipleWithPrefixOutput(TypedDict): + result: str + + +class UploadableCancellableAddInit(TypedDict): + pass + + +class UploadableCancellableAddInput(TypedDict): + n: float + + +class UploadableCancellableAddOutput(TypedDict): + result: float + + +class CancelBlockingRpcInit(TypedDict): + pass + + +class CancelBlockingRpcOutput(TypedDict): + pass + + +class CancelBlockingStreamInit(TypedDict): + pass + + +class CancelBlockingStreamInput(TypedDict): + pass + + +class CancelBlockingStreamOutput(TypedDict): + pass + + +class CancelBlockingUploadInit(TypedDict): + pass + + +class CancelBlockingUploadInput(TypedDict): + pass + + +class CancelBlockingUploadOutput(TypedDict): + pass + + +class CancelBlockingSubscriptionInit(TypedDict): + pass + + +class CancelBlockingSubscriptionOutput(TypedDict): + pass + + +class CancelImmediateRpcInit(TypedDict): + pass + + +class CancelImmediateRpcOutput(TypedDict): + done: bool + + +class CancelImmediateStreamInit(TypedDict): + pass + + +class CancelImmediateStreamInput(TypedDict): + pass + + +class CancelImmediateStreamOutput(TypedDict): + done: bool + + +class CancelImmediateUploadInit(TypedDict): + pass + + +class CancelImmediateUploadInput(TypedDict): + pass + + +class CancelImmediateUploadOutput(TypedDict): + done: bool + + +class CancelImmediateSubscriptionInit(TypedDict): + pass + + +class CancelImmediateSubscriptionOutput(TypedDict): + done: bool + + +class CancelCountedStreamInit(TypedDict): + total: float + + +class CancelCountedStreamInput(TypedDict): + pass + + +class CancelCountedStreamOutput(TypedDict): + i: float diff --git a/python-client/tests/generated/cancel_client.py b/python-client/tests/generated/cancel_client.py new file mode 100644 index 00000000..b580c8df --- /dev/null +++ b/python-client/tests/generated/cancel_client.py @@ -0,0 +1,264 @@ +"""Generated client for the cancel service.""" + +from __future__ import annotations + +import asyncio +from typing import Any + +from river.client import RiverClient +from river.streams import Readable, Writable + +from ._types import ( + CancelBlockingRpcInit, + CancelBlockingStreamInit, + CancelBlockingStreamInput, + CancelBlockingSubscriptionInit, + CancelBlockingUploadInit, + CancelBlockingUploadInput, + CancelCountedStreamInit, + CancelCountedStreamInput, + CancelImmediateRpcInit, + CancelImmediateStreamInit, + CancelImmediateStreamInput, + CancelImmediateSubscriptionInit, + CancelImmediateUploadInit, + CancelImmediateUploadInput, +) + + +class CancelBlockingStreamStreamResult: + """Streaming result for ``cancel.blockingStream``.""" + + def __init__(self, inner: Any) -> None: + self._inner = inner + + @property + def req_writable(self) -> Writable[CancelBlockingStreamInput]: + """Writable stream for sending requests.""" + return self._inner.req_writable + + @property + def res_readable(self) -> Readable[dict[str, Any]]: + """Readable stream for receiving responses.""" + return self._inner.res_readable + + +class CancelBlockingUploadUploadResult: + """Upload result for ``cancel.blockingUpload``.""" + + def __init__(self, inner: Any) -> None: + self._inner = inner + + @property + def req_writable(self) -> Writable[CancelBlockingUploadInput]: + """Writable stream for sending requests.""" + return self._inner.req_writable + + async def finalize(self) -> dict[str, Any]: + """Finalize the upload and get the response.""" + return await self._inner.finalize() + + +class CancelBlockingSubscriptionSubscriptionResult: + """Subscription result for ``cancel.blockingSubscription``.""" + + def __init__(self, inner: Any) -> None: + self._inner = inner + + @property + def res_readable(self) -> Readable[dict[str, Any]]: + """Readable stream for receiving responses.""" + return self._inner.res_readable + + +class CancelImmediateStreamStreamResult: + """Streaming result for ``cancel.immediateStream``.""" + + def __init__(self, inner: Any) -> None: + self._inner = inner + + @property + def req_writable(self) -> Writable[CancelImmediateStreamInput]: + """Writable stream for sending requests.""" + return self._inner.req_writable + + @property + def res_readable(self) -> Readable[dict[str, Any]]: + """Readable stream for receiving responses.""" + return self._inner.res_readable + + +class CancelImmediateUploadUploadResult: + """Upload result for ``cancel.immediateUpload``.""" + + def __init__(self, inner: Any) -> None: + self._inner = inner + + @property + def req_writable(self) -> Writable[CancelImmediateUploadInput]: + """Writable stream for sending requests.""" + return self._inner.req_writable + + async def finalize(self) -> dict[str, Any]: + """Finalize the upload and get the response.""" + return await self._inner.finalize() + + +class CancelImmediateSubscriptionSubscriptionResult: + """Subscription result for ``cancel.immediateSubscription``.""" + + def __init__(self, inner: Any) -> None: + self._inner = inner + + @property + def res_readable(self) -> Readable[dict[str, Any]]: + """Readable stream for receiving responses.""" + return self._inner.res_readable + + +class CancelCountedStreamStreamResult: + """Streaming result for ``cancel.countedStream``.""" + + def __init__(self, inner: Any) -> None: + self._inner = inner + + @property + def req_writable(self) -> Writable[CancelCountedStreamInput]: + """Writable stream for sending requests.""" + return self._inner.req_writable + + @property + def res_readable(self) -> Readable[dict[str, Any]]: + """Readable stream for receiving responses.""" + return self._inner.res_readable + + +class CancelClient: + """Typed client for the ``cancel`` service.""" + + def __init__(self, client: RiverClient) -> None: + self._client = client + + async def blocking_rpc( + self, + init: CancelBlockingRpcInit, + *, + abort_signal: asyncio.Event | None = None, + ) -> dict[str, Any]: + return await self._client.rpc( + "cancel", + "blockingRpc", + init, + abort_signal=abort_signal, + ) + + def blocking_stream( + self, + init: CancelBlockingStreamInit, + *, + abort_signal: asyncio.Event | None = None, + ) -> CancelBlockingStreamStreamResult: + result = self._client.stream( + "cancel", + "blockingStream", + init, + abort_signal=abort_signal, + ) + return CancelBlockingStreamStreamResult(result) + + def blocking_upload( + self, + init: CancelBlockingUploadInit, + *, + abort_signal: asyncio.Event | None = None, + ) -> CancelBlockingUploadUploadResult: + result = self._client.upload( + "cancel", + "blockingUpload", + init, + abort_signal=abort_signal, + ) + return CancelBlockingUploadUploadResult(result) + + def blocking_subscription( + self, + init: CancelBlockingSubscriptionInit, + *, + abort_signal: asyncio.Event | None = None, + ) -> CancelBlockingSubscriptionSubscriptionResult: + result = self._client.subscribe( + "cancel", + "blockingSubscription", + init, + abort_signal=abort_signal, + ) + return CancelBlockingSubscriptionSubscriptionResult(result) + + async def immediate_rpc( + self, + init: CancelImmediateRpcInit, + *, + abort_signal: asyncio.Event | None = None, + ) -> dict[str, Any]: + return await self._client.rpc( + "cancel", + "immediateRpc", + init, + abort_signal=abort_signal, + ) + + def immediate_stream( + self, + init: CancelImmediateStreamInit, + *, + abort_signal: asyncio.Event | None = None, + ) -> CancelImmediateStreamStreamResult: + result = self._client.stream( + "cancel", + "immediateStream", + init, + abort_signal=abort_signal, + ) + return CancelImmediateStreamStreamResult(result) + + def immediate_upload( + self, + init: CancelImmediateUploadInit, + *, + abort_signal: asyncio.Event | None = None, + ) -> CancelImmediateUploadUploadResult: + result = self._client.upload( + "cancel", + "immediateUpload", + init, + abort_signal=abort_signal, + ) + return CancelImmediateUploadUploadResult(result) + + def immediate_subscription( + self, + init: CancelImmediateSubscriptionInit, + *, + abort_signal: asyncio.Event | None = None, + ) -> CancelImmediateSubscriptionSubscriptionResult: + result = self._client.subscribe( + "cancel", + "immediateSubscription", + init, + abort_signal=abort_signal, + ) + return CancelImmediateSubscriptionSubscriptionResult(result) + + def counted_stream( + self, + init: CancelCountedStreamInit, + *, + abort_signal: asyncio.Event | None = None, + ) -> CancelCountedStreamStreamResult: + result = self._client.stream( + "cancel", + "countedStream", + init, + abort_signal=abort_signal, + ) + return CancelCountedStreamStreamResult(result) diff --git a/python-client/tests/generated/conftest.py b/python-client/tests/generated/conftest.py new file mode 100644 index 00000000..37d9ad16 --- /dev/null +++ b/python-client/tests/generated/conftest.py @@ -0,0 +1 @@ +collect_ignore_glob = ["*"] diff --git a/python-client/tests/generated/fallible_client.py b/python-client/tests/generated/fallible_client.py new file mode 100644 index 00000000..9c1eb493 --- /dev/null +++ b/python-client/tests/generated/fallible_client.py @@ -0,0 +1,66 @@ +"""Generated client for the fallible service.""" + +from __future__ import annotations + +import asyncio +from typing import Any + +from river.client import RiverClient +from river.streams import Readable, Writable + +from ._types import ( + FallibleDivideInit, + FallibleEchoInit, + FallibleEchoInput, +) + + +class FallibleEchoStreamResult: + """Streaming result for ``fallible.echo``.""" + + def __init__(self, inner: Any) -> None: + self._inner = inner + + @property + def req_writable(self) -> Writable[FallibleEchoInput]: + """Writable stream for sending requests.""" + return self._inner.req_writable + + @property + def res_readable(self) -> Readable[dict[str, Any]]: + """Readable stream for receiving responses.""" + return self._inner.res_readable + + +class FallibleClient: + """Typed client for the ``fallible`` service.""" + + def __init__(self, client: RiverClient) -> None: + self._client = client + + async def divide( + self, + init: FallibleDivideInit, + *, + abort_signal: asyncio.Event | None = None, + ) -> dict[str, Any]: + return await self._client.rpc( + "fallible", + "divide", + init, + abort_signal=abort_signal, + ) + + def echo( + self, + init: FallibleEchoInit, + *, + abort_signal: asyncio.Event | None = None, + ) -> FallibleEchoStreamResult: + result = self._client.stream( + "fallible", + "echo", + init, + abort_signal=abort_signal, + ) + return FallibleEchoStreamResult(result) diff --git a/python-client/tests/generated/ordering_client.py b/python-client/tests/generated/ordering_client.py new file mode 100644 index 00000000..79adbe5e --- /dev/null +++ b/python-client/tests/generated/ordering_client.py @@ -0,0 +1,46 @@ +"""Generated client for the ordering service.""" + +from __future__ import annotations + +import asyncio +from typing import Any + +from river.client import RiverClient + +from ._types import ( + OrderingAddInit, + OrderingGetAllInit, +) + + +class OrderingClient: + """Typed client for the ``ordering`` service.""" + + def __init__(self, client: RiverClient) -> None: + self._client = client + + async def add( + self, + init: OrderingAddInit, + *, + abort_signal: asyncio.Event | None = None, + ) -> dict[str, Any]: + return await self._client.rpc( + "ordering", + "add", + init, + abort_signal=abort_signal, + ) + + async def get_all( + self, + init: OrderingGetAllInit, + *, + abort_signal: asyncio.Event | None = None, + ) -> dict[str, Any]: + return await self._client.rpc( + "ordering", + "getAll", + init, + abort_signal=abort_signal, + ) diff --git a/python-client/tests/generated/subscribable_client.py b/python-client/tests/generated/subscribable_client.py new file mode 100644 index 00000000..ccb1abe8 --- /dev/null +++ b/python-client/tests/generated/subscribable_client.py @@ -0,0 +1,60 @@ +"""Generated client for the subscribable service.""" + +from __future__ import annotations + +import asyncio +from typing import Any + +from river.client import RiverClient +from river.streams import Readable + +from ._types import ( + SubscribableAddInit, + SubscribableValueInit, +) + + +class SubscribableValueSubscriptionResult: + """Subscription result for ``subscribable.value``.""" + + def __init__(self, inner: Any) -> None: + self._inner = inner + + @property + def res_readable(self) -> Readable[dict[str, Any]]: + """Readable stream for receiving responses.""" + return self._inner.res_readable + + +class SubscribableClient: + """Typed client for the ``subscribable`` service.""" + + def __init__(self, client: RiverClient) -> None: + self._client = client + + async def add( + self, + init: SubscribableAddInit, + *, + abort_signal: asyncio.Event | None = None, + ) -> dict[str, Any]: + return await self._client.rpc( + "subscribable", + "add", + init, + abort_signal=abort_signal, + ) + + def value( + self, + init: SubscribableValueInit, + *, + abort_signal: asyncio.Event | None = None, + ) -> SubscribableValueSubscriptionResult: + result = self._client.subscribe( + "subscribable", + "value", + init, + abort_signal=abort_signal, + ) + return SubscribableValueSubscriptionResult(result) diff --git a/python-client/tests/generated/test_client.py b/python-client/tests/generated/test_client.py new file mode 100644 index 00000000..57482d0b --- /dev/null +++ b/python-client/tests/generated/test_client.py @@ -0,0 +1,99 @@ +"""Generated client for the test service.""" + +from __future__ import annotations + +import asyncio +from typing import Any + +from river.client import RiverClient +from river.streams import Readable, Writable + +from ._types import ( + TestAddInit, + TestEchoInit, + TestEchoInput, + TestEchoWithPrefixInit, + TestEchoWithPrefixInput, +) + + +class TestEchoStreamResult: + """Streaming result for ``test.echo``.""" + + def __init__(self, inner: Any) -> None: + self._inner = inner + + @property + def req_writable(self) -> Writable[TestEchoInput]: + """Writable stream for sending requests.""" + return self._inner.req_writable + + @property + def res_readable(self) -> Readable[dict[str, Any]]: + """Readable stream for receiving responses.""" + return self._inner.res_readable + + +class TestEchoWithPrefixStreamResult: + """Streaming result for ``test.echoWithPrefix``.""" + + def __init__(self, inner: Any) -> None: + self._inner = inner + + @property + def req_writable(self) -> Writable[TestEchoWithPrefixInput]: + """Writable stream for sending requests.""" + return self._inner.req_writable + + @property + def res_readable(self) -> Readable[dict[str, Any]]: + """Readable stream for receiving responses.""" + return self._inner.res_readable + + +class TestClient: + """Typed client for the ``test`` service.""" + + def __init__(self, client: RiverClient) -> None: + self._client = client + + async def add( + self, + init: TestAddInit, + *, + abort_signal: asyncio.Event | None = None, + ) -> dict[str, Any]: + return await self._client.rpc( + "test", + "add", + init, + abort_signal=abort_signal, + ) + + def echo( + self, + init: TestEchoInit, + *, + abort_signal: asyncio.Event | None = None, + ) -> TestEchoStreamResult: + result = self._client.stream( + "test", + "echo", + init, + abort_signal=abort_signal, + ) + return TestEchoStreamResult(result) + + def echo_with_prefix( + self, + init: TestEchoWithPrefixInit, + *, + abort_signal: asyncio.Event | None = None, + ) -> TestEchoWithPrefixStreamResult: + result = self._client.stream( + "test", + "echoWithPrefix", + init, + abort_signal=abort_signal, + ) + return TestEchoWithPrefixStreamResult(result) diff --git a/python-client/tests/generated/uploadable_client.py b/python-client/tests/generated/uploadable_client.py new file mode 100644 index 00000000..1c6b4737 --- /dev/null +++ b/python-client/tests/generated/uploadable_client.py @@ -0,0 +1,115 @@ +"""Generated client for the uploadable service.""" + +from __future__ import annotations + +import asyncio +from typing import Any + +from river.client import RiverClient +from river.streams import Writable + +from ._types import ( + UploadableAddMultipleInit, + UploadableAddMultipleInput, + UploadableAddMultipleWithPrefixInit, + UploadableAddMultipleWithPrefixInput, + UploadableCancellableAddInit, + UploadableCancellableAddInput, +) + + +class UploadableAddMultipleUploadResult: + """Upload result for ``uploadable.addMultiple``.""" + + def __init__(self, inner: Any) -> None: + self._inner = inner + + @property + def req_writable(self) -> Writable[UploadableAddMultipleInput]: + """Writable stream for sending requests.""" + return self._inner.req_writable + + async def finalize(self) -> dict[str, Any]: + """Finalize the upload and get the response.""" + return await self._inner.finalize() + + +class UploadableAddMultipleWithPrefixUploadResult: + """Upload result for ``uploadable.addMultipleWithPrefix``.""" + + def __init__(self, inner: Any) -> None: + self._inner = inner + + @property + def req_writable(self) -> Writable[UploadableAddMultipleWithPrefixInput]: + """Writable stream for sending requests.""" + return self._inner.req_writable + + async def finalize(self) -> dict[str, Any]: + """Finalize the upload and get the response.""" + return await self._inner.finalize() + + +class UploadableCancellableAddUploadResult: + """Upload result for ``uploadable.cancellableAdd``.""" + + def __init__(self, inner: Any) -> None: + self._inner = inner + + @property + def req_writable(self) -> Writable[UploadableCancellableAddInput]: + """Writable stream for sending requests.""" + return self._inner.req_writable + + async def finalize(self) -> dict[str, Any]: + """Finalize the upload and get the response.""" + return await self._inner.finalize() + + +class UploadableClient: + """Typed client for the ``uploadable`` service.""" + + def __init__(self, client: RiverClient) -> None: + self._client = client + + def add_multiple( + self, + init: UploadableAddMultipleInit, + *, + abort_signal: asyncio.Event | None = None, + ) -> UploadableAddMultipleUploadResult: + result = self._client.upload( + "uploadable", + "addMultiple", + init, + abort_signal=abort_signal, + ) + return UploadableAddMultipleUploadResult(result) + + def add_multiple_with_prefix( + self, + init: UploadableAddMultipleWithPrefixInit, + *, + abort_signal: asyncio.Event | None = None, + ) -> UploadableAddMultipleWithPrefixUploadResult: + result = self._client.upload( + "uploadable", + "addMultipleWithPrefix", + init, + abort_signal=abort_signal, + ) + return UploadableAddMultipleWithPrefixUploadResult(result) + + def cancellable_add( + self, + init: UploadableCancellableAddInit, + *, + abort_signal: asyncio.Event | None = None, + ) -> UploadableCancellableAddUploadResult: + result = self._client.upload( + "uploadable", + "cancellableAdd", + init, + abort_signal=abort_signal, + ) + return UploadableCancellableAddUploadResult(result) diff --git a/python-client/tests/test_codegen.py b/python-client/tests/test_codegen.py new file mode 100644 index 00000000..f4020f75 --- /dev/null +++ b/python-client/tests/test_codegen.py @@ -0,0 +1,383 @@ +"""Tests for River codegen pipeline. + +Tests the full pipeline: schema extraction → codegen → import → live usage. +""" + +from __future__ import annotations + +import json +import os +import sys + +import pytest + +TESTS_DIR = os.path.dirname(__file__) +SCHEMA_JSON = os.path.join(TESTS_DIR, "test_schema.json") +GENERATED_DIR = os.path.join(TESTS_DIR, "generated") + + +# --------------------------------------------------------------------------- +# Schema conversion tests +# --------------------------------------------------------------------------- + + +class TestSchemaConversion: + """Test JSON Schema → IR conversion.""" + + @pytest.fixture(autouse=True) + def _setup(self, generated_client_dir: str) -> None: + """Ensure codegen has run.""" + + def _load_schema(self) -> dict: + with open(SCHEMA_JSON) as f: + return json.load(f) + + def test_schema_has_services(self) -> None: + schema = self._load_schema() + assert "services" in schema + svc_names = set(schema["services"].keys()) + assert svc_names == { + "test", + "ordering", + "fallible", + "subscribable", + "uploadable", + "cancel", + } + + def test_converter_produces_ir(self) -> None: + from river.codegen.schema import SchemaConverter + + schema = self._load_schema() + converter = SchemaConverter() + ir = converter.convert(schema) + + svc_names = {s.name for s in ir.services} + assert svc_names == { + "test", + "ordering", + "fallible", + "subscribable", + "uploadable", + "cancel", + } + + def test_test_service_procedures(self) -> None: + from river.codegen.schema import SchemaConverter + + schema = self._load_schema() + converter = SchemaConverter() + ir = converter.convert(schema) + + test_svc = next(s for s in ir.services if s.name == "test") + proc_names = {p.name for p in test_svc.procedures} + assert "add" in proc_names + assert "echo" in proc_names + assert "echoWithPrefix" in proc_names + + def test_procedure_types(self) -> None: + from river.codegen.schema import SchemaConverter + + schema = self._load_schema() + converter = SchemaConverter() + ir = converter.convert(schema) + + test_svc = next(s for s in ir.services if s.name == "test") + procs = {p.name: p for p in test_svc.procedures} + + assert procs["add"].proc_type == "rpc" + assert procs["echo"].proc_type == "stream" + assert procs["echoWithPrefix"].proc_type == "stream" + + def test_snake_case_method_names(self) -> None: + from river.codegen.schema import SchemaConverter + + schema = self._load_schema() + converter = SchemaConverter() + ir = converter.convert(schema) + + test_svc = next(s for s in ir.services if s.name == "test") + procs = {p.name: p for p in test_svc.procedures} + + assert procs["echoWithPrefix"].py_name == "echo_with_prefix" + assert procs["add"].py_name == "add" + + def test_typedicts_generated(self) -> None: + from river.codegen.schema import SchemaConverter + + schema = self._load_schema() + converter = SchemaConverter() + ir = converter.convert(schema) + + td_names = {td.name for td in ir.typedicts} + assert "TestAddInit" in td_names + assert "TestEchoInit" in td_names + assert "TestEchoInput" in td_names + assert "TestEchoWithPrefixInit" in td_names + + def test_fallible_service_errors(self) -> None: + from river.codegen.schema import SchemaConverter + + schema = self._load_schema() + converter = SchemaConverter() + ir = converter.convert(schema) + + fallible_svc = next(s for s in ir.services if s.name == "fallible") + divide_proc = next(p for p in fallible_svc.procedures if p.name == "divide") + + # Should have service-specific errors + assert divide_proc.error_type is not None + assert "DivByZero" in divide_proc.error_type.annotation + assert "Infinity" in divide_proc.error_type.annotation + + def test_upload_procedures(self) -> None: + from river.codegen.schema import SchemaConverter + + schema = self._load_schema() + converter = SchemaConverter() + ir = converter.convert(schema) + + upload_svc = next(s for s in ir.services if s.name == "uploadable") + procs = {p.name: p for p in upload_svc.procedures} + + assert procs["addMultiple"].proc_type == "upload" + assert procs["addMultiple"].input_type is not None + + def test_subscription_procedures(self) -> None: + from river.codegen.schema import SchemaConverter + + schema = self._load_schema() + converter = SchemaConverter() + ir = converter.convert(schema) + + sub_svc = next(s for s in ir.services if s.name == "subscribable") + procs = {p.name: p for p in sub_svc.procedures} + + assert procs["value"].proc_type == "subscription" + assert procs["value"].input_type is None + + +# --------------------------------------------------------------------------- +# Generated code import tests +# --------------------------------------------------------------------------- + + +class TestGeneratedImports: + """Test that generated code can be imported.""" + + @pytest.fixture(autouse=True) + def _setup(self, generated_client_dir: str) -> None: + """Ensure codegen has run and generated dir is on sys.path.""" + if TESTS_DIR not in sys.path: + sys.path.insert(0, TESTS_DIR) + + def test_import_init(self) -> None: + import generated + + assert hasattr(generated, "TestClient") + assert hasattr(generated, "FallibleClient") + assert hasattr(generated, "UploadableClient") + assert hasattr(generated, "SubscribableClient") + assert hasattr(generated, "OrderingClient") + assert hasattr(generated, "CancelClient") + + def test_import_types(self) -> None: + from generated._types import ( + TestAddInit, + TestEchoInit, + TestEchoInput, + TestEchoWithPrefixInit, + ) + + # TypedDicts should be classes + assert isinstance(TestAddInit, type) + assert isinstance(TestEchoInit, type) + assert isinstance(TestEchoInput, type) + assert isinstance(TestEchoWithPrefixInit, type) + + def test_import_errors(self) -> None: + from generated._errors import ( + Cancel, + InvalidRequest, + UncaughtError, + UnexpectedDisconnect, + ) + + assert isinstance(UncaughtError, type) + assert isinstance(UnexpectedDisconnect, type) + assert isinstance(InvalidRequest, type) + assert isinstance(Cancel, type) + + def test_client_class_has_methods(self) -> None: + from generated import TestClient + + assert hasattr(TestClient, "add") + assert hasattr(TestClient, "echo") + assert hasattr(TestClient, "echo_with_prefix") + + def test_fallible_client_has_methods(self) -> None: + from generated import FallibleClient + + assert hasattr(FallibleClient, "divide") + assert hasattr(FallibleClient, "echo") + + def test_uploadable_client_has_methods(self) -> None: + from generated import UploadableClient + + assert hasattr(UploadableClient, "add_multiple") + assert hasattr(UploadableClient, "add_multiple_with_prefix") + assert hasattr(UploadableClient, "cancellable_add") + + def test_subscribable_client_has_methods(self) -> None: + from generated import SubscribableClient + + assert hasattr(SubscribableClient, "add") + assert hasattr(SubscribableClient, "value") + + +# --------------------------------------------------------------------------- +# Live test server integration tests +# --------------------------------------------------------------------------- + + +class TestGeneratedClientsLive: + """Test generated proxy clients against the live test server.""" + + @pytest.fixture(autouse=True) + def _setup(self, generated_client_dir: str) -> None: + if TESTS_DIR not in sys.path: + sys.path.insert(0, TESTS_DIR) + + async def _make_client(self, server_url: str): + from river import ( + NaiveJsonCodec, + RiverClient, + WebSocketClientTransport, + ) + + transport = WebSocketClientTransport( + server_url, + client_id="test-codegen-client", + server_id="SERVER", + codec=NaiveJsonCodec(), + ) + client = RiverClient(transport, server_id="SERVER") + return client, transport + + async def test_rpc_via_generated_client(self, server_url: str) -> None: + from generated import TestClient + + client, transport = await self._make_client(server_url) + try: + test = TestClient(client) + result = await test.add({"n": 0}) + assert result["ok"] is True + assert isinstance(result["payload"]["result"], (int, float)) + finally: + await transport.close() + + async def test_stream_via_generated_client(self, server_url: str) -> None: + from generated import TestClient + + client, transport = await self._make_client(server_url) + try: + test = TestClient(client) + stream = test.echo({}) + + stream.req_writable.write({"msg": "hello", "ignore": False}) + stream.req_writable.write({"msg": "world", "ignore": False}) + stream.req_writable.close() + + messages = [] + async for msg in stream.res_readable: + if msg.get("ok"): + messages.append(msg["payload"]["response"]) + + assert "hello" in messages + assert "world" in messages + finally: + await transport.close() + + async def test_stream_with_prefix_via_generated_client( + self, server_url: str + ) -> None: + from generated import TestClient + + client, transport = await self._make_client(server_url) + try: + test = TestClient(client) + stream = test.echo_with_prefix({"prefix": ">>>"}) + + stream.req_writable.write({"msg": "test", "ignore": False}) + stream.req_writable.close() + + messages = [] + async for msg in stream.res_readable: + if msg.get("ok"): + messages.append(msg["payload"]["response"]) + + assert len(messages) == 1 + assert messages[0] == ">>> test" + finally: + await transport.close() + + async def test_upload_via_generated_client(self, server_url: str) -> None: + from generated import UploadableClient + + client, transport = await self._make_client(server_url) + try: + upload_client = UploadableClient(client) + upload = upload_client.add_multiple({}) + + upload.req_writable.write({"n": 1}) + upload.req_writable.write({"n": 2}) + upload.req_writable.write({"n": 3}) + upload.req_writable.close() + + result = await upload.finalize() + assert result["ok"] is True + assert result["payload"]["result"] == 6 + finally: + await transport.close() + + async def test_subscription_via_generated_client(self, server_url: str) -> None: + from generated import SubscribableClient + + client, transport = await self._make_client(server_url) + try: + sub_client = SubscribableClient(client) + sub = sub_client.value({}) + + # Get the initial value + done, msg = await sub.res_readable.next() + assert not done + assert msg["ok"] is True + assert "count" in msg["payload"] + + sub.res_readable.break_() + finally: + await transport.close() + + async def test_fallible_rpc_success(self, server_url: str) -> None: + from generated import FallibleClient + + client, transport = await self._make_client(server_url) + try: + fallible = FallibleClient(client) + result = await fallible.divide({"a": 10, "b": 2}) + assert result["ok"] is True + assert result["payload"]["result"] == 5.0 + finally: + await transport.close() + + async def test_fallible_rpc_error(self, server_url: str) -> None: + from generated import FallibleClient + + client, transport = await self._make_client(server_url) + try: + fallible = FallibleClient(client) + result = await fallible.divide({"a": 10, "b": 0}) + assert result["ok"] is False + assert result["payload"]["code"] == "DIV_BY_ZERO" + finally: + await transport.close() diff --git a/python-client/tests/test_schema.json b/python-client/tests/test_schema.json new file mode 100644 index 00000000..e0c6bfa2 --- /dev/null +++ b/python-client/tests/test_schema.json @@ -0,0 +1,2654 @@ +{ + "services": { + "test": { + "procedures": { + "add": { + "init": { + "type": "object", + "properties": { + "n": { + "type": "number" + } + }, + "required": [ + "n" + ] + }, + "output": { + "type": "object", + "properties": { + "result": { + "type": "number" + } + }, + "required": [ + "result" + ] + }, + "errors": { + "anyOf": [ + { + "type": "object", + "properties": { + "code": { + "const": "UNCAUGHT_ERROR", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "UNEXPECTED_DISCONNECT", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "INVALID_REQUEST", + "type": "string" + }, + "message": { + "type": "string" + }, + "extras": { + "type": "object", + "properties": { + "firstValidationErrors": { + "type": "array", + "items": { + "type": "object", + "properties": { + "path": { + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "path", + "message" + ] + } + }, + "totalErrors": { + "type": "number" + } + }, + "required": [ + "firstValidationErrors", + "totalErrors" + ] + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "CANCEL", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + } + ] + }, + "type": "rpc" + }, + "echo": { + "init": { + "type": "object", + "properties": {} + }, + "output": { + "type": "object", + "properties": { + "response": { + "type": "string" + } + }, + "required": [ + "response" + ] + }, + "errors": { + "anyOf": [ + { + "type": "object", + "properties": { + "code": { + "const": "UNCAUGHT_ERROR", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "UNEXPECTED_DISCONNECT", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "INVALID_REQUEST", + "type": "string" + }, + "message": { + "type": "string" + }, + "extras": { + "type": "object", + "properties": { + "firstValidationErrors": { + "type": "array", + "items": { + "type": "object", + "properties": { + "path": { + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "path", + "message" + ] + } + }, + "totalErrors": { + "type": "number" + } + }, + "required": [ + "firstValidationErrors", + "totalErrors" + ] + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "CANCEL", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + } + ] + }, + "type": "stream", + "input": { + "type": "object", + "properties": { + "msg": { + "type": "string" + }, + "ignore": { + "type": "boolean" + } + }, + "required": [ + "msg" + ] + } + }, + "echoWithPrefix": { + "init": { + "type": "object", + "properties": { + "prefix": { + "type": "string" + } + }, + "required": [ + "prefix" + ] + }, + "output": { + "type": "object", + "properties": { + "response": { + "type": "string" + } + }, + "required": [ + "response" + ] + }, + "errors": { + "anyOf": [ + { + "type": "object", + "properties": { + "code": { + "const": "UNCAUGHT_ERROR", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "UNEXPECTED_DISCONNECT", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "INVALID_REQUEST", + "type": "string" + }, + "message": { + "type": "string" + }, + "extras": { + "type": "object", + "properties": { + "firstValidationErrors": { + "type": "array", + "items": { + "type": "object", + "properties": { + "path": { + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "path", + "message" + ] + } + }, + "totalErrors": { + "type": "number" + } + }, + "required": [ + "firstValidationErrors", + "totalErrors" + ] + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "CANCEL", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + } + ] + }, + "type": "stream", + "input": { + "type": "object", + "properties": { + "msg": { + "type": "string" + }, + "ignore": { + "type": "boolean" + } + }, + "required": [ + "msg" + ] + } + } + } + }, + "ordering": { + "procedures": { + "add": { + "init": { + "type": "object", + "properties": { + "n": { + "type": "number" + } + }, + "required": [ + "n" + ] + }, + "output": { + "type": "object", + "properties": { + "n": { + "type": "number" + } + }, + "required": [ + "n" + ] + }, + "errors": { + "anyOf": [ + { + "type": "object", + "properties": { + "code": { + "const": "UNCAUGHT_ERROR", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "UNEXPECTED_DISCONNECT", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "INVALID_REQUEST", + "type": "string" + }, + "message": { + "type": "string" + }, + "extras": { + "type": "object", + "properties": { + "firstValidationErrors": { + "type": "array", + "items": { + "type": "object", + "properties": { + "path": { + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "path", + "message" + ] + } + }, + "totalErrors": { + "type": "number" + } + }, + "required": [ + "firstValidationErrors", + "totalErrors" + ] + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "CANCEL", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + } + ] + }, + "type": "rpc" + }, + "getAll": { + "init": { + "type": "object", + "properties": {} + }, + "output": { + "type": "object", + "properties": { + "msgs": { + "type": "array", + "items": { + "type": "number" + } + } + }, + "required": [ + "msgs" + ] + }, + "errors": { + "anyOf": [ + { + "type": "object", + "properties": { + "code": { + "const": "UNCAUGHT_ERROR", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "UNEXPECTED_DISCONNECT", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "INVALID_REQUEST", + "type": "string" + }, + "message": { + "type": "string" + }, + "extras": { + "type": "object", + "properties": { + "firstValidationErrors": { + "type": "array", + "items": { + "type": "object", + "properties": { + "path": { + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "path", + "message" + ] + } + }, + "totalErrors": { + "type": "number" + } + }, + "required": [ + "firstValidationErrors", + "totalErrors" + ] + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "CANCEL", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + } + ] + }, + "type": "rpc" + } + } + }, + "fallible": { + "procedures": { + "divide": { + "init": { + "type": "object", + "properties": { + "a": { + "type": "number" + }, + "b": { + "type": "number" + } + }, + "required": [ + "a", + "b" + ] + }, + "output": { + "type": "object", + "properties": { + "result": { + "type": "number" + } + }, + "required": [ + "result" + ] + }, + "errors": { + "anyOf": [ + { + "type": "object", + "properties": { + "code": { + "const": "DIV_BY_ZERO", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "INFINITY", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "UNCAUGHT_ERROR", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "UNEXPECTED_DISCONNECT", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "INVALID_REQUEST", + "type": "string" + }, + "message": { + "type": "string" + }, + "extras": { + "type": "object", + "properties": { + "firstValidationErrors": { + "type": "array", + "items": { + "type": "object", + "properties": { + "path": { + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "path", + "message" + ] + } + }, + "totalErrors": { + "type": "number" + } + }, + "required": [ + "firstValidationErrors", + "totalErrors" + ] + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "CANCEL", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + } + ] + }, + "type": "rpc" + }, + "echo": { + "init": { + "type": "object", + "properties": {} + }, + "output": { + "type": "object", + "properties": { + "response": { + "type": "string" + } + }, + "required": [ + "response" + ] + }, + "errors": { + "anyOf": [ + { + "type": "object", + "properties": { + "code": { + "const": "STREAM_ERROR", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "UNCAUGHT_ERROR", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "UNEXPECTED_DISCONNECT", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "INVALID_REQUEST", + "type": "string" + }, + "message": { + "type": "string" + }, + "extras": { + "type": "object", + "properties": { + "firstValidationErrors": { + "type": "array", + "items": { + "type": "object", + "properties": { + "path": { + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "path", + "message" + ] + } + }, + "totalErrors": { + "type": "number" + } + }, + "required": [ + "firstValidationErrors", + "totalErrors" + ] + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "CANCEL", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + } + ] + }, + "type": "stream", + "input": { + "type": "object", + "properties": { + "msg": { + "type": "string" + }, + "throwResult": { + "type": "boolean" + }, + "throwError": { + "type": "boolean" + } + }, + "required": [ + "msg" + ] + } + } + } + }, + "subscribable": { + "procedures": { + "add": { + "init": { + "type": "object", + "properties": { + "n": { + "type": "number" + } + }, + "required": [ + "n" + ] + }, + "output": { + "type": "object", + "properties": { + "result": { + "type": "number" + } + }, + "required": [ + "result" + ] + }, + "errors": { + "anyOf": [ + { + "type": "object", + "properties": { + "code": { + "const": "UNCAUGHT_ERROR", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "UNEXPECTED_DISCONNECT", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "INVALID_REQUEST", + "type": "string" + }, + "message": { + "type": "string" + }, + "extras": { + "type": "object", + "properties": { + "firstValidationErrors": { + "type": "array", + "items": { + "type": "object", + "properties": { + "path": { + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "path", + "message" + ] + } + }, + "totalErrors": { + "type": "number" + } + }, + "required": [ + "firstValidationErrors", + "totalErrors" + ] + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "CANCEL", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + } + ] + }, + "type": "rpc" + }, + "value": { + "init": { + "type": "object", + "properties": {} + }, + "output": { + "type": "object", + "properties": { + "count": { + "type": "number" + } + }, + "required": [ + "count" + ] + }, + "errors": { + "anyOf": [ + { + "type": "object", + "properties": { + "code": { + "const": "UNCAUGHT_ERROR", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "UNEXPECTED_DISCONNECT", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "INVALID_REQUEST", + "type": "string" + }, + "message": { + "type": "string" + }, + "extras": { + "type": "object", + "properties": { + "firstValidationErrors": { + "type": "array", + "items": { + "type": "object", + "properties": { + "path": { + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "path", + "message" + ] + } + }, + "totalErrors": { + "type": "number" + } + }, + "required": [ + "firstValidationErrors", + "totalErrors" + ] + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "CANCEL", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + } + ] + }, + "type": "subscription" + } + } + }, + "uploadable": { + "procedures": { + "addMultiple": { + "init": { + "type": "object", + "properties": {} + }, + "output": { + "type": "object", + "properties": { + "result": { + "type": "number" + } + }, + "required": [ + "result" + ] + }, + "errors": { + "anyOf": [ + { + "type": "object", + "properties": { + "code": { + "const": "UNCAUGHT_ERROR", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "UNEXPECTED_DISCONNECT", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "INVALID_REQUEST", + "type": "string" + }, + "message": { + "type": "string" + }, + "extras": { + "type": "object", + "properties": { + "firstValidationErrors": { + "type": "array", + "items": { + "type": "object", + "properties": { + "path": { + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "path", + "message" + ] + } + }, + "totalErrors": { + "type": "number" + } + }, + "required": [ + "firstValidationErrors", + "totalErrors" + ] + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "CANCEL", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + } + ] + }, + "type": "upload", + "input": { + "type": "object", + "properties": { + "n": { + "type": "number" + } + }, + "required": [ + "n" + ] + } + }, + "addMultipleWithPrefix": { + "init": { + "type": "object", + "properties": { + "prefix": { + "type": "string" + } + }, + "required": [ + "prefix" + ] + }, + "output": { + "type": "object", + "properties": { + "result": { + "type": "string" + } + }, + "required": [ + "result" + ] + }, + "errors": { + "anyOf": [ + { + "type": "object", + "properties": { + "code": { + "const": "UNCAUGHT_ERROR", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "UNEXPECTED_DISCONNECT", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "INVALID_REQUEST", + "type": "string" + }, + "message": { + "type": "string" + }, + "extras": { + "type": "object", + "properties": { + "firstValidationErrors": { + "type": "array", + "items": { + "type": "object", + "properties": { + "path": { + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "path", + "message" + ] + } + }, + "totalErrors": { + "type": "number" + } + }, + "required": [ + "firstValidationErrors", + "totalErrors" + ] + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "CANCEL", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + } + ] + }, + "type": "upload", + "input": { + "type": "object", + "properties": { + "n": { + "type": "number" + } + }, + "required": [ + "n" + ] + } + }, + "cancellableAdd": { + "init": { + "type": "object", + "properties": {} + }, + "output": { + "type": "object", + "properties": { + "result": { + "type": "number" + } + }, + "required": [ + "result" + ] + }, + "errors": { + "anyOf": [ + { + "type": "object", + "properties": { + "code": { + "const": "CANCEL", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "UNCAUGHT_ERROR", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "UNEXPECTED_DISCONNECT", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "INVALID_REQUEST", + "type": "string" + }, + "message": { + "type": "string" + }, + "extras": { + "type": "object", + "properties": { + "firstValidationErrors": { + "type": "array", + "items": { + "type": "object", + "properties": { + "path": { + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "path", + "message" + ] + } + }, + "totalErrors": { + "type": "number" + } + }, + "required": [ + "firstValidationErrors", + "totalErrors" + ] + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "CANCEL", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + } + ] + }, + "type": "upload", + "input": { + "type": "object", + "properties": { + "n": { + "type": "number" + } + }, + "required": [ + "n" + ] + } + } + } + }, + "cancel": { + "procedures": { + "blockingRpc": { + "init": { + "type": "object", + "properties": {} + }, + "output": { + "type": "object", + "properties": {} + }, + "errors": { + "anyOf": [ + { + "type": "object", + "properties": { + "code": { + "const": "UNCAUGHT_ERROR", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "UNEXPECTED_DISCONNECT", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "INVALID_REQUEST", + "type": "string" + }, + "message": { + "type": "string" + }, + "extras": { + "type": "object", + "properties": { + "firstValidationErrors": { + "type": "array", + "items": { + "type": "object", + "properties": { + "path": { + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "path", + "message" + ] + } + }, + "totalErrors": { + "type": "number" + } + }, + "required": [ + "firstValidationErrors", + "totalErrors" + ] + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "CANCEL", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + } + ] + }, + "type": "rpc" + }, + "blockingStream": { + "init": { + "type": "object", + "properties": {} + }, + "output": { + "type": "object", + "properties": {} + }, + "errors": { + "anyOf": [ + { + "type": "object", + "properties": { + "code": { + "const": "UNCAUGHT_ERROR", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "UNEXPECTED_DISCONNECT", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "INVALID_REQUEST", + "type": "string" + }, + "message": { + "type": "string" + }, + "extras": { + "type": "object", + "properties": { + "firstValidationErrors": { + "type": "array", + "items": { + "type": "object", + "properties": { + "path": { + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "path", + "message" + ] + } + }, + "totalErrors": { + "type": "number" + } + }, + "required": [ + "firstValidationErrors", + "totalErrors" + ] + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "CANCEL", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + } + ] + }, + "type": "stream", + "input": { + "type": "object", + "properties": {} + } + }, + "blockingUpload": { + "init": { + "type": "object", + "properties": {} + }, + "output": { + "type": "object", + "properties": {} + }, + "errors": { + "anyOf": [ + { + "type": "object", + "properties": { + "code": { + "const": "UNCAUGHT_ERROR", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "UNEXPECTED_DISCONNECT", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "INVALID_REQUEST", + "type": "string" + }, + "message": { + "type": "string" + }, + "extras": { + "type": "object", + "properties": { + "firstValidationErrors": { + "type": "array", + "items": { + "type": "object", + "properties": { + "path": { + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "path", + "message" + ] + } + }, + "totalErrors": { + "type": "number" + } + }, + "required": [ + "firstValidationErrors", + "totalErrors" + ] + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "CANCEL", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + } + ] + }, + "type": "upload", + "input": { + "type": "object", + "properties": {} + } + }, + "blockingSubscription": { + "init": { + "type": "object", + "properties": {} + }, + "output": { + "type": "object", + "properties": {} + }, + "errors": { + "anyOf": [ + { + "type": "object", + "properties": { + "code": { + "const": "UNCAUGHT_ERROR", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "UNEXPECTED_DISCONNECT", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "INVALID_REQUEST", + "type": "string" + }, + "message": { + "type": "string" + }, + "extras": { + "type": "object", + "properties": { + "firstValidationErrors": { + "type": "array", + "items": { + "type": "object", + "properties": { + "path": { + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "path", + "message" + ] + } + }, + "totalErrors": { + "type": "number" + } + }, + "required": [ + "firstValidationErrors", + "totalErrors" + ] + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "CANCEL", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + } + ] + }, + "type": "subscription" + }, + "immediateRpc": { + "init": { + "type": "object", + "properties": {} + }, + "output": { + "type": "object", + "properties": { + "done": { + "type": "boolean" + } + }, + "required": [ + "done" + ] + }, + "errors": { + "anyOf": [ + { + "type": "object", + "properties": { + "code": { + "const": "UNCAUGHT_ERROR", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "UNEXPECTED_DISCONNECT", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "INVALID_REQUEST", + "type": "string" + }, + "message": { + "type": "string" + }, + "extras": { + "type": "object", + "properties": { + "firstValidationErrors": { + "type": "array", + "items": { + "type": "object", + "properties": { + "path": { + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "path", + "message" + ] + } + }, + "totalErrors": { + "type": "number" + } + }, + "required": [ + "firstValidationErrors", + "totalErrors" + ] + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "CANCEL", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + } + ] + }, + "type": "rpc" + }, + "immediateStream": { + "init": { + "type": "object", + "properties": {} + }, + "output": { + "type": "object", + "properties": { + "done": { + "type": "boolean" + } + }, + "required": [ + "done" + ] + }, + "errors": { + "anyOf": [ + { + "type": "object", + "properties": { + "code": { + "const": "UNCAUGHT_ERROR", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "UNEXPECTED_DISCONNECT", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "INVALID_REQUEST", + "type": "string" + }, + "message": { + "type": "string" + }, + "extras": { + "type": "object", + "properties": { + "firstValidationErrors": { + "type": "array", + "items": { + "type": "object", + "properties": { + "path": { + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "path", + "message" + ] + } + }, + "totalErrors": { + "type": "number" + } + }, + "required": [ + "firstValidationErrors", + "totalErrors" + ] + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "CANCEL", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + } + ] + }, + "type": "stream", + "input": { + "type": "object", + "properties": {} + } + }, + "immediateUpload": { + "init": { + "type": "object", + "properties": {} + }, + "output": { + "type": "object", + "properties": { + "done": { + "type": "boolean" + } + }, + "required": [ + "done" + ] + }, + "errors": { + "anyOf": [ + { + "type": "object", + "properties": { + "code": { + "const": "UNCAUGHT_ERROR", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "UNEXPECTED_DISCONNECT", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "INVALID_REQUEST", + "type": "string" + }, + "message": { + "type": "string" + }, + "extras": { + "type": "object", + "properties": { + "firstValidationErrors": { + "type": "array", + "items": { + "type": "object", + "properties": { + "path": { + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "path", + "message" + ] + } + }, + "totalErrors": { + "type": "number" + } + }, + "required": [ + "firstValidationErrors", + "totalErrors" + ] + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "CANCEL", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + } + ] + }, + "type": "upload", + "input": { + "type": "object", + "properties": {} + } + }, + "immediateSubscription": { + "init": { + "type": "object", + "properties": {} + }, + "output": { + "type": "object", + "properties": { + "done": { + "type": "boolean" + } + }, + "required": [ + "done" + ] + }, + "errors": { + "anyOf": [ + { + "type": "object", + "properties": { + "code": { + "const": "UNCAUGHT_ERROR", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "UNEXPECTED_DISCONNECT", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "INVALID_REQUEST", + "type": "string" + }, + "message": { + "type": "string" + }, + "extras": { + "type": "object", + "properties": { + "firstValidationErrors": { + "type": "array", + "items": { + "type": "object", + "properties": { + "path": { + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "path", + "message" + ] + } + }, + "totalErrors": { + "type": "number" + } + }, + "required": [ + "firstValidationErrors", + "totalErrors" + ] + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "CANCEL", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + } + ] + }, + "type": "subscription" + }, + "countedStream": { + "init": { + "type": "object", + "properties": { + "total": { + "type": "number" + } + }, + "required": [ + "total" + ] + }, + "output": { + "type": "object", + "properties": { + "i": { + "type": "number" + } + }, + "required": [ + "i" + ] + }, + "errors": { + "anyOf": [ + { + "type": "object", + "properties": { + "code": { + "const": "UNCAUGHT_ERROR", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "UNEXPECTED_DISCONNECT", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "INVALID_REQUEST", + "type": "string" + }, + "message": { + "type": "string" + }, + "extras": { + "type": "object", + "properties": { + "firstValidationErrors": { + "type": "array", + "items": { + "type": "object", + "properties": { + "path": { + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "path", + "message" + ] + } + }, + "totalErrors": { + "type": "number" + } + }, + "required": [ + "firstValidationErrors", + "totalErrors" + ] + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "CANCEL", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + } + ] + }, + "type": "stream", + "input": { + "type": "object", + "properties": {} + } + } + } + } + } +} \ No newline at end of file From 00867d457a1ced395137654dd071c95c668e8226 Mon Sep 17 00:00:00 2001 From: Jacky Zhao Date: Mon, 2 Mar 2026 16:21:30 -0800 Subject: [PATCH 06/29] more fixes --- .github/workflows/ci.yaml | 19 +++ .github/workflows/ci.yml | 50 ------ .prettierignore | 4 + python-client/tests/extract_test_schema.mjs | 150 ++++------------- python-client/tests/extract_test_schema.ts | 173 ++++---------------- 5 files changed, 91 insertions(+), 305 deletions(-) delete mode 100644 .github/workflows/ci.yml diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index d9e328a8..41c24098 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -52,3 +52,22 @@ jobs: name: Test Report (${{ matrix.os }}) path: ./test-results.xml reporter: java-junit + + - name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: '3.12' + + - name: Install Python dependencies + working-directory: python-client + run: pip install -e ".[dev]" + + - name: Python lint + working-directory: python-client + run: | + ruff check . + ruff format --check . + + - name: Python tests + working-directory: python-client + run: python -m pytest tests/ -v diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml deleted file mode 100644 index cef50ef4..00000000 --- a/.github/workflows/ci.yml +++ /dev/null @@ -1,50 +0,0 @@ -name: Build and Test - -on: - push: - branches: [main] - pull_request: - branches: [main] - -jobs: - build-and-test: - runs-on: ${{ matrix.os }} - strategy: - matrix: - os: [ubuntu-latest, macos-latest] - steps: - - uses: actions/checkout@v4 - - - name: Setup Node - uses: actions/setup-node@v4 - with: - node-version: 22 - - - name: Install Node dependencies - run: npm ci - - - name: TypeScript check - run: npm run check - - - name: TypeScript tests - run: npm run test:single - - - name: Setup Python - uses: actions/setup-python@v5 - with: - python-version: '3.12' - - - name: Install Python dependencies - working-directory: python-client - run: pip install -e ".[dev]" - - - name: Python lint - working-directory: python-client - run: | - pip install ruff - ruff check . - ruff format --check . - - - name: Python tests - working-directory: python-client - run: python -m pytest tests/ -v diff --git a/.prettierignore b/.prettierignore index 0a0d4376..83de37ed 100644 --- a/.prettierignore +++ b/.prettierignore @@ -3,3 +3,7 @@ node_modules python-client/.venv python-client/.pytest_cache python-client/tests/test_server.mjs +python-client/tests/extract_test_schema.mjs +python-client/tests/test_schema.json +python-client/tests/generated +.codex-review-tmp diff --git a/python-client/tests/extract_test_schema.mjs b/python-client/tests/extract_test_schema.mjs index db334f3b..b2610707 100644 --- a/python-client/tests/extract_test_schema.mjs +++ b/python-client/tests/extract_test_schema.mjs @@ -357,12 +357,6 @@ function Ok(payload) { payload }; } -function Err(error) { - return { - ok: false, - payload: error - }; -} // router/procedures.ts import { Type as Type4 } from "@sinclair/typebox"; @@ -444,15 +438,13 @@ var Procedure = { // python-client/tests/extract_test_schema.ts import { Type as Type5 } from "@sinclair/typebox"; var ServiceSchema = createServiceSchema(); -var count = 0; var TestServiceSchema = ServiceSchema.define({ add: Procedure.rpc({ requestInit: Type5.Object({ n: Type5.Number() }), responseData: Type5.Object({ result: Type5.Number() }), responseError: Type5.Never(), async handler({ reqInit }) { - count += reqInit.n; - return Ok({ result: count }); + return Ok({ result: reqInit.n }); } }), echo: Procedure.stream({ @@ -463,13 +455,7 @@ var TestServiceSchema = ServiceSchema.define({ }), responseData: Type5.Object({ response: Type5.String() }), responseError: Type5.Never(), - async handler({ reqReadable, resWritable }) { - for await (const result of reqReadable) { - if (!result.ok) break; - const val = result.payload; - if (val.ignore) continue; - resWritable.write(Ok({ response: val.msg })); - } + async handler({ resWritable }) { resWritable.close(); } }), @@ -481,25 +467,17 @@ var TestServiceSchema = ServiceSchema.define({ }), responseData: Type5.Object({ response: Type5.String() }), responseError: Type5.Never(), - async handler({ reqInit, reqReadable, resWritable }) { - for await (const result of reqReadable) { - if (!result.ok) break; - const val = result.payload; - if (val.ignore) continue; - resWritable.write(Ok({ response: `${reqInit.prefix} ${val.msg}` })); - } + async handler({ resWritable }) { resWritable.close(); } }) }); -var msgs = []; var OrderingServiceSchema = ServiceSchema.define({ add: Procedure.rpc({ requestInit: Type5.Object({ n: Type5.Number() }), responseData: Type5.Object({ n: Type5.Number() }), responseError: Type5.Never(), async handler({ reqInit }) { - msgs.push(reqInit.n); return Ok({ n: reqInit.n }); } }), @@ -507,8 +485,8 @@ var OrderingServiceSchema = ServiceSchema.define({ requestInit: Type5.Object({}), responseData: Type5.Object({ msgs: Type5.Array(Type5.Number()) }), responseError: Type5.Never(), - async handler() { - return Ok({ msgs: [...msgs] }); + async handler(_ctx) { + return Ok({ msgs: [] }); } }) }); @@ -527,14 +505,7 @@ var FallibleServiceSchema = ServiceSchema.define({ }) ]), async handler({ reqInit }) { - if (reqInit.b === 0) { - return Err({ code: "DIV_BY_ZERO", message: "Cannot divide by zero" }); - } - const result = reqInit.a / reqInit.b; - if (!isFinite(result)) { - return Err({ code: "INFINITY", message: "Result is infinity" }); - } - return Ok({ result }); + return Ok({ result: reqInit.a / reqInit.b }); } }), echo: Procedure.stream({ @@ -549,48 +520,27 @@ var FallibleServiceSchema = ServiceSchema.define({ code: Type5.Literal("STREAM_ERROR"), message: Type5.String() }), - async handler({ reqReadable, resWritable }) { - for await (const result of reqReadable) { - if (!result.ok) break; - const val = result.payload; - if (val.throwError) throw new Error("uncaught error"); - if (val.throwResult) { - resWritable.write(Err({ code: "STREAM_ERROR", message: "stream error" })); - continue; - } - resWritable.write(Ok({ response: val.msg })); - } + async handler({ resWritable }) { resWritable.close(); } }) }); -var subCount = 0; -var subListeners = /* @__PURE__ */ new Set(); var SubscribableServiceSchema = ServiceSchema.define({ add: Procedure.rpc({ requestInit: Type5.Object({ n: Type5.Number() }), responseData: Type5.Object({ result: Type5.Number() }), responseError: Type5.Never(), async handler({ reqInit }) { - subCount += reqInit.n; - for (const l of subListeners) l(subCount); - return Ok({ result: subCount }); + return Ok({ result: reqInit.n }); } }), value: Procedure.subscription({ requestInit: Type5.Object({}), responseData: Type5.Object({ count: Type5.Number() }), responseError: Type5.Never(), - async handler({ resWritable, ctx }) { - resWritable.write(Ok({ count: subCount })); - const listener = (val) => { - resWritable.write(Ok({ count: val })); - }; - subListeners.add(listener); - ctx.signal.addEventListener("abort", () => { - subListeners.delete(listener); - resWritable.close(); - }); + async handler({ resWritable }) { + resWritable.write(Ok({ count: 0 })); + resWritable.close(); } }) }); @@ -600,13 +550,8 @@ var UploadableServiceSchema = ServiceSchema.define({ requestData: Type5.Object({ n: Type5.Number() }), responseData: Type5.Object({ result: Type5.Number() }), responseError: Type5.Never(), - async handler({ reqReadable }) { - let total = 0; - for await (const result of reqReadable) { - if (!result.ok) break; - total += result.payload.n; - } - return Ok({ result: total }); + async handler(_ctx) { + return Ok({ result: 0 }); } }), addMultipleWithPrefix: Procedure.upload({ @@ -614,13 +559,8 @@ var UploadableServiceSchema = ServiceSchema.define({ requestData: Type5.Object({ n: Type5.Number() }), responseData: Type5.Object({ result: Type5.String() }), responseError: Type5.Never(), - async handler({ reqInit, reqReadable }) { - let total = 0; - for await (const result of reqReadable) { - if (!result.ok) break; - total += result.payload.n; - } - return Ok({ result: `${reqInit.prefix} ${total}` }); + async handler(_ctx) { + return Ok({ result: "" }); } }), cancellableAdd: Procedure.upload({ @@ -631,17 +571,8 @@ var UploadableServiceSchema = ServiceSchema.define({ code: Type5.Literal("CANCEL"), message: Type5.String() }), - async handler({ reqReadable, ctx }) { - let total = 0; - for await (const result of reqReadable) { - if (!result.ok) break; - total += result.payload.n; - if (total >= 10) { - ctx.cancel(); - return Err({ code: "CANCEL", message: "total exceeds limit" }); - } - } - return Ok({ result: total }); + async handler(_ctx) { + return Ok({ result: 0 }); } }) }); @@ -650,11 +581,8 @@ var CancellationServiceSchema = ServiceSchema.define({ requestInit: Type5.Object({}), responseData: Type5.Object({}), responseError: Type5.Never(), - async handler({ ctx }) { - return new Promise((_resolve) => { - ctx.signal.addEventListener("abort", () => { - }); - }); + async handler(_ctx) { + return Ok({}); } }), blockingStream: Procedure.stream({ @@ -662,9 +590,8 @@ var CancellationServiceSchema = ServiceSchema.define({ requestData: Type5.Object({}), responseData: Type5.Object({}), responseError: Type5.Never(), - async handler(_ctx) { - return new Promise(() => { - }); + async handler({ resWritable }) { + resWritable.close(); } }), blockingUpload: Procedure.upload({ @@ -673,24 +600,22 @@ var CancellationServiceSchema = ServiceSchema.define({ responseData: Type5.Object({}), responseError: Type5.Never(), async handler(_ctx) { - return new Promise(() => { - }); + return Ok({}); } }), blockingSubscription: Procedure.subscription({ requestInit: Type5.Object({}), responseData: Type5.Object({}), responseError: Type5.Never(), - async handler(_ctx) { - return new Promise(() => { - }); + async handler({ resWritable }) { + resWritable.close(); } }), immediateRpc: Procedure.rpc({ requestInit: Type5.Object({}), responseData: Type5.Object({ done: Type5.Boolean() }), responseError: Type5.Never(), - async handler() { + async handler(_ctx) { return Ok({ done: true }); } }), @@ -699,11 +624,7 @@ var CancellationServiceSchema = ServiceSchema.define({ requestData: Type5.Object({}), responseData: Type5.Object({ done: Type5.Boolean() }), responseError: Type5.Never(), - async handler({ reqReadable, resWritable }) { - resWritable.write(Ok({ done: true })); - for await (const result of reqReadable) { - if (!result.ok) break; - } + async handler({ resWritable }) { resWritable.close(); } }), @@ -712,10 +633,7 @@ var CancellationServiceSchema = ServiceSchema.define({ requestData: Type5.Object({}), responseData: Type5.Object({ done: Type5.Boolean() }), responseError: Type5.Never(), - async handler({ reqReadable }) { - for await (const result of reqReadable) { - if (!result.ok) break; - } + async handler(_ctx) { return Ok({ done: true }); } }), @@ -724,7 +642,6 @@ var CancellationServiceSchema = ServiceSchema.define({ responseData: Type5.Object({ done: Type5.Boolean() }), responseError: Type5.Never(), async handler({ resWritable }) { - resWritable.write(Ok({ done: true })); resWritable.close(); } }), @@ -733,13 +650,7 @@ var CancellationServiceSchema = ServiceSchema.define({ requestData: Type5.Object({}), responseData: Type5.Object({ i: Type5.Number() }), responseError: Type5.Never(), - async handler({ reqInit, reqReadable, resWritable }) { - for (let i = 0; i < reqInit.total; i++) { - resWritable.write(Ok({ i })); - } - for await (const result of reqReadable) { - if (!result.ok) break; - } + async handler({ resWritable }) { resWritable.close(); } }) @@ -753,6 +664,9 @@ var services = { cancel: CancellationServiceSchema }; var schema = serializeSchema(services); -var outPath = path.join(path.dirname(new URL(import.meta.url).pathname), "test_schema.json"); +var outPath = path.join( + path.dirname(new URL(import.meta.url).pathname), + "test_schema.json" +); fs.writeFileSync(outPath, JSON.stringify(schema, null, 2)); console.log(`Wrote schema to ${outPath}`); diff --git a/python-client/tests/extract_test_schema.ts b/python-client/tests/extract_test_schema.ts index e36fef81..03b1ae30 100644 --- a/python-client/tests/extract_test_schema.ts +++ b/python-client/tests/extract_test_schema.ts @@ -1,10 +1,11 @@ /** * Extract the test server schema to a JSON file for codegen tests. * - * Usage (from river repo root): - * node python-client/tests/extract_test_schema.mjs + * Defines the same service schemas as test_server.ts but with stub + * handlers — only the type shapes matter for serialization. * - * Outputs: python-client/tests/test_schema.json + * Usage (from river repo root, after esbuild bundle): + * node python-client/tests/extract_test_schema.mjs */ import fs from 'node:fs'; import path from 'node:path'; @@ -12,26 +13,19 @@ import { createServiceSchema, Procedure, Ok, - Err, serializeSchema, } from '../../router'; import { Type } from '@sinclair/typebox'; const ServiceSchema = createServiceSchema(); -// ------------------------------------------------------------------- -// TestService -// ------------------------------------------------------------------- -let count = 0; - const TestServiceSchema = ServiceSchema.define({ add: Procedure.rpc({ requestInit: Type.Object({ n: Type.Number() }), responseData: Type.Object({ result: Type.Number() }), responseError: Type.Never(), async handler({ reqInit }) { - count += reqInit.n; - return Ok({ result: count }); + return Ok({ result: reqInit.n }); }, }), echo: Procedure.stream({ @@ -42,13 +36,7 @@ const TestServiceSchema = ServiceSchema.define({ }), responseData: Type.Object({ response: Type.String() }), responseError: Type.Never(), - async handler({ reqReadable, resWritable }) { - for await (const result of reqReadable) { - if (!result.ok) break; - const val = result.payload; - if (val.ignore) continue; - resWritable.write(Ok({ response: val.msg })); - } + async handler({ resWritable }) { resWritable.close(); }, }), @@ -60,30 +48,18 @@ const TestServiceSchema = ServiceSchema.define({ }), responseData: Type.Object({ response: Type.String() }), responseError: Type.Never(), - async handler({ reqInit, reqReadable, resWritable }) { - for await (const result of reqReadable) { - if (!result.ok) break; - const val = result.payload; - if (val.ignore) continue; - resWritable.write(Ok({ response: `${reqInit.prefix} ${val.msg}` })); - } + async handler({ resWritable }) { resWritable.close(); }, }), }); -// ------------------------------------------------------------------- -// OrderingService -// ------------------------------------------------------------------- -const msgs: Array = []; - const OrderingServiceSchema = ServiceSchema.define({ add: Procedure.rpc({ requestInit: Type.Object({ n: Type.Number() }), responseData: Type.Object({ n: Type.Number() }), responseError: Type.Never(), async handler({ reqInit }) { - msgs.push(reqInit.n); return Ok({ n: reqInit.n }); }, }), @@ -91,15 +67,12 @@ const OrderingServiceSchema = ServiceSchema.define({ requestInit: Type.Object({}), responseData: Type.Object({ msgs: Type.Array(Type.Number()) }), responseError: Type.Never(), - async handler() { - return Ok({ msgs: [...msgs] }); + async handler(_ctx) { + return Ok({ msgs: [] as Array }); }, }), }); -// ------------------------------------------------------------------- -// FallibleService -// ------------------------------------------------------------------- const FallibleServiceSchema = ServiceSchema.define({ divide: Procedure.rpc({ requestInit: Type.Object({ a: Type.Number(), b: Type.Number() }), @@ -115,14 +88,7 @@ const FallibleServiceSchema = ServiceSchema.define({ }), ]), async handler({ reqInit }) { - if (reqInit.b === 0) { - return Err({ code: 'DIV_BY_ZERO' as const, message: 'Cannot divide by zero' }); - } - const result = reqInit.a / reqInit.b; - if (!isFinite(result)) { - return Err({ code: 'INFINITY' as const, message: 'Result is infinity' }); - } - return Ok({ result }); + return Ok({ result: reqInit.a / reqInit.b }); }, }), echo: Procedure.stream({ @@ -137,74 +103,40 @@ const FallibleServiceSchema = ServiceSchema.define({ code: Type.Literal('STREAM_ERROR'), message: Type.String(), }), - async handler({ reqReadable, resWritable }) { - for await (const result of reqReadable) { - if (!result.ok) break; - const val = result.payload; - if (val.throwError) throw new Error('uncaught error'); - if (val.throwResult) { - resWritable.write(Err({ code: 'STREAM_ERROR' as const, message: 'stream error' })); - continue; - } - resWritable.write(Ok({ response: val.msg })); - } + async handler({ resWritable }) { resWritable.close(); }, }), }); -// ------------------------------------------------------------------- -// SubscribableService -// ------------------------------------------------------------------- -let subCount = 0; -type SubListener = (val: number) => void; -const subListeners = new Set(); - const SubscribableServiceSchema = ServiceSchema.define({ add: Procedure.rpc({ requestInit: Type.Object({ n: Type.Number() }), responseData: Type.Object({ result: Type.Number() }), responseError: Type.Never(), async handler({ reqInit }) { - subCount += reqInit.n; - for (const l of subListeners) l(subCount); - return Ok({ result: subCount }); + return Ok({ result: reqInit.n }); }, }), value: Procedure.subscription({ requestInit: Type.Object({}), responseData: Type.Object({ count: Type.Number() }), responseError: Type.Never(), - async handler({ resWritable, ctx }) { - resWritable.write(Ok({ count: subCount })); - const listener: SubListener = (val) => { - resWritable.write(Ok({ count: val })); - }; - subListeners.add(listener); - ctx.signal.addEventListener('abort', () => { - subListeners.delete(listener); - resWritable.close(); - }); + async handler({ resWritable }) { + resWritable.write(Ok({ count: 0 })); + resWritable.close(); }, }), }); -// ------------------------------------------------------------------- -// UploadableService -// ------------------------------------------------------------------- const UploadableServiceSchema = ServiceSchema.define({ addMultiple: Procedure.upload({ requestInit: Type.Object({}), requestData: Type.Object({ n: Type.Number() }), responseData: Type.Object({ result: Type.Number() }), responseError: Type.Never(), - async handler({ reqReadable }) { - let total = 0; - for await (const result of reqReadable) { - if (!result.ok) break; - total += result.payload.n; - } - return Ok({ result: total }); + async handler(_ctx) { + return Ok({ result: 0 }); }, }), addMultipleWithPrefix: Procedure.upload({ @@ -212,13 +144,8 @@ const UploadableServiceSchema = ServiceSchema.define({ requestData: Type.Object({ n: Type.Number() }), responseData: Type.Object({ result: Type.String() }), responseError: Type.Never(), - async handler({ reqInit, reqReadable }) { - let total = 0; - for await (const result of reqReadable) { - if (!result.ok) break; - total += result.payload.n; - } - return Ok({ result: `${reqInit.prefix} ${total}` }); + async handler(_ctx) { + return Ok({ result: '' }); }, }), cancellableAdd: Procedure.upload({ @@ -229,33 +156,19 @@ const UploadableServiceSchema = ServiceSchema.define({ code: Type.Literal('CANCEL'), message: Type.String(), }), - async handler({ reqReadable, ctx }) { - let total = 0; - for await (const result of reqReadable) { - if (!result.ok) break; - total += result.payload.n; - if (total >= 10) { - ctx.cancel(); - return Err({ code: 'CANCEL' as const, message: 'total exceeds limit' }); - } - } - return Ok({ result: total }); + async handler(_ctx) { + return Ok({ result: 0 }); }, }), }); -// ------------------------------------------------------------------- -// CancellationService -// ------------------------------------------------------------------- const CancellationServiceSchema = ServiceSchema.define({ blockingRpc: Procedure.rpc({ requestInit: Type.Object({}), responseData: Type.Object({}), responseError: Type.Never(), - async handler({ ctx }) { - return new Promise((_resolve) => { - ctx.signal.addEventListener('abort', () => {}); - }); + async handler(_ctx) { + return Ok({}); }, }), blockingStream: Procedure.stream({ @@ -263,8 +176,8 @@ const CancellationServiceSchema = ServiceSchema.define({ requestData: Type.Object({}), responseData: Type.Object({}), responseError: Type.Never(), - async handler(_ctx) { - return new Promise(() => {}); + async handler({ resWritable }) { + resWritable.close(); }, }), blockingUpload: Procedure.upload({ @@ -273,22 +186,22 @@ const CancellationServiceSchema = ServiceSchema.define({ responseData: Type.Object({}), responseError: Type.Never(), async handler(_ctx) { - return new Promise(() => {}); + return Ok({}); }, }), blockingSubscription: Procedure.subscription({ requestInit: Type.Object({}), responseData: Type.Object({}), responseError: Type.Never(), - async handler(_ctx) { - return new Promise(() => {}); + async handler({ resWritable }) { + resWritable.close(); }, }), immediateRpc: Procedure.rpc({ requestInit: Type.Object({}), responseData: Type.Object({ done: Type.Boolean() }), responseError: Type.Never(), - async handler() { + async handler(_ctx) { return Ok({ done: true }); }, }), @@ -297,11 +210,7 @@ const CancellationServiceSchema = ServiceSchema.define({ requestData: Type.Object({}), responseData: Type.Object({ done: Type.Boolean() }), responseError: Type.Never(), - async handler({ reqReadable, resWritable }) { - resWritable.write(Ok({ done: true })); - for await (const result of reqReadable) { - if (!result.ok) break; - } + async handler({ resWritable }) { resWritable.close(); }, }), @@ -310,10 +219,7 @@ const CancellationServiceSchema = ServiceSchema.define({ requestData: Type.Object({}), responseData: Type.Object({ done: Type.Boolean() }), responseError: Type.Never(), - async handler({ reqReadable }) { - for await (const result of reqReadable) { - if (!result.ok) break; - } + async handler(_ctx) { return Ok({ done: true }); }, }), @@ -322,7 +228,6 @@ const CancellationServiceSchema = ServiceSchema.define({ responseData: Type.Object({ done: Type.Boolean() }), responseError: Type.Never(), async handler({ resWritable }) { - resWritable.write(Ok({ done: true })); resWritable.close(); }, }), @@ -331,21 +236,12 @@ const CancellationServiceSchema = ServiceSchema.define({ requestData: Type.Object({}), responseData: Type.Object({ i: Type.Number() }), responseError: Type.Never(), - async handler({ reqInit, reqReadable, resWritable }) { - for (let i = 0; i < reqInit.total; i++) { - resWritable.write(Ok({ i })); - } - for await (const result of reqReadable) { - if (!result.ok) break; - } + async handler({ resWritable }) { resWritable.close(); }, }), }); -// ------------------------------------------------------------------- -// Serialize and write -// ------------------------------------------------------------------- const services = { test: TestServiceSchema, ordering: OrderingServiceSchema, @@ -356,6 +252,9 @@ const services = { }; const schema = serializeSchema(services); -const outPath = path.join(path.dirname(new URL(import.meta.url).pathname), 'test_schema.json'); +const outPath = path.join( + path.dirname(new URL(import.meta.url).pathname), + 'test_schema.json', +); fs.writeFileSync(outPath, JSON.stringify(schema, null, 2)); console.log(`Wrote schema to ${outPath}`); From c95ca443430675ee7bff51e9cd3ab61c886b9aa6 Mon Sep 17 00:00:00 2001 From: Jacky Zhao Date: Mon, 2 Mar 2026 17:07:06 -0800 Subject: [PATCH 07/29] fixes --- python-client/.coverage | Bin 0 -> 53248 bytes python-client/pyproject.toml | 1 + python-client/river/client.py | 31 +- python-client/river/transport.py | 7 +- python-client/tests/conftest.py | 113 +- python-client/tests/extract_test_schema.mjs | 8 + python-client/tests/extract_test_schema.ts | 8 + python-client/tests/generated/_types.py | 9 + python-client/tests/generated/test_client.py | 14 + python-client/tests/test_e2e.py | 133 +- python-client/tests/test_equivalence.py | 640 +++ python-client/tests/test_handshake.py | 140 + python-client/tests/test_schema.json | 127 + python-client/tests/test_server.ts | 14 +- python-client/tests/test_server_handshake.mjs | 4534 +++++++++++++++++ python-client/tests/test_server_handshake.ts | 83 + python-client/tests/test_session.py | 227 + 17 files changed, 6041 insertions(+), 48 deletions(-) create mode 100644 python-client/.coverage create mode 100644 python-client/tests/test_equivalence.py create mode 100644 python-client/tests/test_handshake.py create mode 100644 python-client/tests/test_server_handshake.mjs create mode 100644 python-client/tests/test_server_handshake.ts create mode 100644 python-client/tests/test_session.py diff --git a/python-client/.coverage b/python-client/.coverage new file mode 100644 index 0000000000000000000000000000000000000000..fafba2ef5d198605b691eaf9fdf502241e1aba0e GIT binary patch literal 53248 zcmeI4Yiu0V702)F>}%HE9jBIATq}4`n>ud%h)LBJk`g=Mn1~2Mn?!_`ZpQ1eJ!W^t znVC&&D%)8nBM%eT?z!{W zwG$g*r;$bYkGy+l?!D*SbAI>Sd-v{m=X(!r)g4XgHLbkrDC<}i69m?-D2y?QUP*cd zM~qIw!3Cwltn-CVCD!tj$)voKRYo6X@(+_=mYXa4D*st=xboZ5jtV{2B(+ch4iEqV z5CDOjhCuP5%2>L#Ru~^~)b5<-SZY?Yy!Yfi-|g(!-l=Tw*tE4%@$!__35Aa4W~D>1 z%zetBW+}aTPE&NFN6)H`ZuBWmzZTRk*jf)?vnY>NEncz3mFH&}y@#Y6t&da;T6$i! zhLy*(;U>3&-^=g)8WHsBfVzu7Wm}opOmWZXRtA!^u z*C*A=(qF|KZz!Ye%9dVIlr^m$Zhg0<8rgo$Zc@~ehgp;C9CAEwEi>Qhm`YAJTsI9{ zcXZQGv>`29aI~Iz9c<#zf`j1A_J(xr<__)lB>lE#(J2ehBk5Q7g?q_3!wj&N1$omX z?AmM;^4)}dppYNrokJ2{!;ZO-C&d;>bTjjwO@mRXPUU+hk z8z>x4rdxOH(up)DXiC`&gbpuXIBxhTnHwa06yCTzmb7Reo7+a$OLUTb-t+!xg`B1UB4_R}sW~qHzi(l$Z3(Fly z6gq8EG~HRhQ0SCGAk$VlO%!XRv2K1axua{}_PvXT`IF_!d5sKc3;PVoHMdmocbIL>Ds)gXQ zn?I;J{mQkr^K3G_AreimuUW`&ZbGJwzef*NgUki-_7+SF~uwc*-TX=7Q3J&w0$6v4bGVE!kxt|YC-i|kkkf72)gVLrUdX>&Vsb9)xtU!OuATi-u z^J`{$*%RWL;HQ z4zdSnmZpzLvxAERpT&@0dM)r7(d+hZ4_#2p|A3TeY4 zL*hy~__vU?Rf1So>;DhHw_4iPwR|H%UWMdyOg<-H_*^;&qd@=!KmY_l00ck)1V8`; zKmY_l00c?|D#evT@HIdvA=XL$w*dV9e?{`EOg=6b<&M-$>W$QoQ=Q4F9mn*9UON0 zO`|!R(>25K^Ri|SJ%h$*&1CYbZe%j_i}ub0DUz3}$ook*so5-%niWe`qvdtS(Ja!_ z6~8{ebVGMszwVBaB4M!x*&02>z%?$DD6^4$ClA(4$O)&MWiNP43{t&58l&tytn$M64FBv&!{b$LAX zX7azuRmqo=MryOXNoJ`hzEHy)&Vm34fB*=900@8p2!H?xfB<9ng^SgT-rml$|F75_ zF3L+*RNDV1y28a3OIB0f|4Y~DS7{R&-xw|mi#Djd|Br1DM;0v}?Ej-n5f1kMk&bXN zviPQF?f=6|^S!kH7r!HpE!u$6{y#*mi_&%M|MC1kOmH9o0w4eaAOHd&00JNY0w4ea zAaGL?5CxGX`Tc)Eo?`Te0|Y<-1V8`;KmY_l00ck)1V8`;K;R}MAc{$e-~Ugfo@Vl8 z`DOWt{BwC!-X`Ccx{`W{GI4+a2!H?xfB*=900@8p2!H?xfWZHoz^X)8khVrAvWbf) zPMn!OI`i7anPc^5&wlOpgSXt)appnk;9s8WVyP`sM39PK3Xi`hoPO{8Q<16jA5VNd z)K_)p&1oqPYqzlQ$!gWVT@ed|jTUyt3X9FU@nMfV>Po}Z}MDehu_D*o(bXj`rD z%bK75=Fhjssl|K6fn85zj$YcaW83i~t;eQ6I6D$;xpXLhwDN=BPk(c5jH-0UHXRpE ze>i$a{PN)wcCuyac;p`g-**nKIr0qK|E&1pbbS587vB2&g%?I|d2OaCN)nGGLMx9= zz5B}8NAJGQ^ylu6d? z)E;L08EdCU@<+o|l8Qf4EmFEVGNENpy{-LX*JG#O_{!lQb%Z#CP>3M38-~#$GFEl$ zQ8xN3HuV4-oxV%BT*Ll7dO~DtU-@Ku^1uTVm-nYnK0Y})F|+QSmCuebwmTLT!je?= zNr>P7Pspz_`HK7>`7QZP`Mmt9{I+~neqTOCML0kJ1V8`;KmY_l00ck)1V8`;KmY`m znE?N)L5L>YBrLg!7 dict[str, Any]: + writable = result["req_writable"] + if writable.is_writable(): + writable.close() readable = result["res_readable"] done, value = await readable.next() if done: @@ -271,6 +274,7 @@ def _handle_proc( # Tracking state clean_close = True cleaned_up = False + abort_task: asyncio.Task | None = None def cleanup(): nonlocal cleaned_up @@ -279,15 +283,21 @@ def cleanup(): cleaned_up = True transport.remove_event_listener("message", on_message) transport.remove_event_listener("sessionStatus", on_session_status) + if abort_task is not None and not abort_task.done(): + abort_task.cancel() + + def _try_cleanup(): + """Run cleanup once both sides have been closed/triggered.""" + if res_readable._closed and req_writable.is_closed(): + cleanup() def close_readable(): - if not res_readable.is_closed(): + if not res_readable._closed: try: res_readable._trigger_close() except RuntimeError: pass - if req_writable.is_closed(): - cleanup() + _try_cleanup() # Create writable for requests def write_cb(raw_value: Any) -> None: @@ -309,8 +319,7 @@ def close_cb() -> None: send_fn(close_stream_message(stream_id)) except RuntimeError: pass - if res_readable.is_closed(): - cleanup() + _try_cleanup() req_writable: Writable = Writable(write_cb=write_cb, close_cb=close_cb) @@ -336,7 +345,7 @@ def on_message(msg: TransportMessage) -> None: res_readable._push_value(err_result(code, str(payload))) close_readable() if req_writable.is_writable(): - req_writable._closed = True + req_writable.close() return if res_readable.is_closed(): @@ -374,10 +383,12 @@ def on_session_status(evt: dict) -> None: pass close_readable() if req_writable.is_writable(): - req_writable._closed = True + req_writable.close() def on_client_cancel() -> None: nonlocal clean_close + if cleaned_up: + return clean_close = False try: res_readable._push_value(err_result(CANCEL_CODE, "cancelled by client")) @@ -385,7 +396,7 @@ def on_client_cancel() -> None: pass close_readable() if req_writable.is_writable(): - req_writable._closed = True + req_writable.close() try: send_fn( cancel_message( @@ -402,14 +413,14 @@ def on_client_cancel() -> None: # Wire up abort signal if abort_signal is not None: - # Use asyncio task to watch the event + async def _watch_abort(): await abort_signal.wait() on_client_cancel() try: loop = asyncio.get_event_loop() - loop.create_task(_watch_abort()) + abort_task = loop.create_task(_watch_abort()) except RuntimeError: pass diff --git a/python-client/river/transport.py b/python-client/river/transport.py index 92655be7..973153b3 100644 --- a/python-client/river/transport.py +++ b/python-client/river/transport.py @@ -129,8 +129,6 @@ def __init__( codec: Codec | None = None, options: SessionOptions | None = None, handshake_metadata: Any = None, - connect_on_invoke: bool = True, - eagerly_connect: bool = False, ) -> None: self.client_id = client_id or generate_id() self.server_id = server_id or "SERVER" @@ -139,7 +137,6 @@ def __init__( self._codec_adapter = CodecMessageAdapter(self._codec) self.options = options or DEFAULT_SESSION_OPTIONS self._handshake_metadata = handshake_metadata - self._connect_on_invoke = connect_on_invoke # State self._status: str = "open" # 'open' | 'closed' @@ -333,6 +330,7 @@ async def _do_handshake(self, session: Session, ws: Any, to: str) -> None: self._delete_session(to) self._try_reconnecting(to) else: + # Fatal handshake error — do not retry self._events.dispatch( "protocolError", { @@ -341,7 +339,7 @@ async def _do_handshake(self, session: Session, ws: Any, to: str) -> None: "code": code, }, ) - self._on_connection_failed(to) + session.state = SessionState.NO_CONNECTION return # Check session ID match @@ -353,6 +351,7 @@ async def _do_handshake(self, session: Session, ws: Any, to: str) -> None: session.id, resp_session_id, ) + await ws.close() # The server lost our session state; destroy old and create new self._delete_session(to, emit_closing=True) self._try_reconnecting(to) diff --git a/python-client/tests/conftest.py b/python-client/tests/conftest.py index b8d99cdd..31b908f4 100644 --- a/python-client/tests/conftest.py +++ b/python-client/tests/conftest.py @@ -1,12 +1,11 @@ """Pytest fixtures for River Python client tests. -Manages the lifecycle of a TypeScript test server process that the +Manages the lifecycle of TypeScript test server processes that the Python client connects to. """ from __future__ import annotations -import asyncio import os import re import signal @@ -17,9 +16,13 @@ import pytest +from river.codec import BinaryCodec, Codec, NaiveJsonCodec + TESTS_DIR = os.path.dirname(__file__) SERVER_TS = os.path.join(TESTS_DIR, "test_server.ts") SERVER_MJS = os.path.join(TESTS_DIR, "test_server.mjs") +HANDSHAKE_SERVER_TS = os.path.join(TESTS_DIR, "test_server_handshake.ts") +HANDSHAKE_SERVER_MJS = os.path.join(TESTS_DIR, "test_server_handshake.mjs") EXTRACT_SCHEMA_TS = os.path.join(TESTS_DIR, "extract_test_schema.ts") EXTRACT_SCHEMA_MJS = os.path.join(TESTS_DIR, "extract_test_schema.mjs") SCHEMA_JSON = os.path.join(TESTS_DIR, "test_schema.json") @@ -42,6 +45,7 @@ def _esbuild_bundle(ts_path: str, mjs_path: str) -> None: # we reuse whatever is already in node_modules "--external:ws", "--external:@sinclair/typebox", + "--external:@msgpack/msgpack", ], cwd=RIVER_ROOT, capture_output=True, @@ -56,6 +60,11 @@ def _build_test_server() -> None: _esbuild_bundle(SERVER_TS, SERVER_MJS) +def _build_handshake_server() -> None: + """Bundle test_server_handshake.ts -> test_server_handshake.mjs using esbuild.""" + _esbuild_bundle(HANDSHAKE_SERVER_TS, HANDSHAKE_SERVER_MJS) + + def _extract_test_schema() -> None: """Bundle and run extract_test_schema.ts to produce test_schema.json, then run codegen to produce the generated client module.""" @@ -92,38 +101,21 @@ def _extract_test_schema() -> None: ) -@pytest.fixture(scope="session") -def generated_client_dir() -> str: - """Extract test schema and run codegen. Returns the generated dir path.""" - _extract_test_schema() - return GENERATED_DIR - - -@pytest.fixture(scope="session") -def event_loop(): - """Create an event loop for the entire test session.""" - loop = asyncio.new_event_loop() - yield loop - loop.close() - - -@pytest.fixture(scope="session") -def river_server_port() -> Generator[int, None, None]: - """Build and start the TypeScript test server, yield its port. - - The server is built once via esbuild and kept alive for the entire - test session. - """ - _build_test_server() - +def _start_server( + mjs_path: str, + label: str, + env: dict[str, str] | None = None, +) -> tuple[subprocess.Popen, int]: + """Start a Node.js server process and return (proc, port).""" + full_env = {**os.environ, **(env or {})} proc = subprocess.Popen( - ["node", SERVER_MJS], + ["node", mjs_path], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=RIVER_ROOT, + env=full_env, ) - # Wait for the server to print the port port = None deadline = time.monotonic() + 30 assert proc.stdout is not None @@ -133,7 +125,7 @@ def river_server_port() -> Generator[int, None, None]: if proc.poll() is not None: stderr = proc.stderr.read().decode("utf-8") if proc.stderr else "" raise RuntimeError( - f"Test server exited with code {proc.returncode}.\nstderr: {stderr}" + f"{label} exited with code {proc.returncode}.\nstderr: {stderr}" ) time.sleep(0.1) continue @@ -144,10 +136,12 @@ def river_server_port() -> Generator[int, None, None]: if port is None: proc.kill() - raise RuntimeError("Failed to get port from test server within 30s") + raise RuntimeError(f"Failed to get port from {label} within 30s") - yield port + return proc, port + +def _stop_server(proc: subprocess.Popen) -> None: proc.send_signal(signal.SIGTERM) try: proc.wait(timeout=5) @@ -155,7 +149,64 @@ def river_server_port() -> Generator[int, None, None]: proc.kill() +@pytest.fixture(scope="session") +def generated_client_dir() -> str: + """Extract test schema and run codegen. Returns the generated dir path.""" + _extract_test_schema() + return GENERATED_DIR + + +@pytest.fixture(scope="session") +def river_server_port() -> Generator[int, None, None]: + """Build and start the TypeScript test server (JSON codec), yield its port.""" + _build_test_server() + proc, port = _start_server(SERVER_MJS, "Test server") + yield port + _stop_server(proc) + + +@pytest.fixture(scope="session") +def river_binary_server_port() -> Generator[int, None, None]: + """Build and start the TypeScript test server (binary codec), yield its port.""" + _build_test_server() + proc, port = _start_server( + SERVER_MJS, "Binary test server", env={"RIVER_CODEC": "binary"} + ) + yield port + _stop_server(proc) + + @pytest.fixture def server_url(river_server_port: int) -> str: """Return the WebSocket URL for the test server.""" return f"ws://127.0.0.1:{river_server_port}" + + +@pytest.fixture(scope="session") +def river_handshake_server_port() -> Generator[int, None, None]: + """Build and start the handshake test server, yield its port.""" + _build_handshake_server() + proc, port = _start_server(HANDSHAKE_SERVER_MJS, "Handshake test server") + yield port + _stop_server(proc) + + +@pytest.fixture +def handshake_server_url(river_handshake_server_port: int) -> str: + """Return the WebSocket URL for the handshake test server.""" + return f"ws://127.0.0.1:{river_handshake_server_port}" + + +@pytest.fixture(params=["json", "binary"]) +def codec_and_url( + request: pytest.FixtureRequest, + river_server_port: int, + river_binary_server_port: int, +) -> tuple[Codec, str]: + """Parametrized fixture returning (codec, server_url) pairs. + + Each codec is paired with a server that speaks the same protocol. + """ + if request.param == "json": + return NaiveJsonCodec(), f"ws://127.0.0.1:{river_server_port}" + return BinaryCodec(), f"ws://127.0.0.1:{river_binary_server_port}" diff --git a/python-client/tests/extract_test_schema.mjs b/python-client/tests/extract_test_schema.mjs index b2610707..a4e8fe05 100644 --- a/python-client/tests/extract_test_schema.mjs +++ b/python-client/tests/extract_test_schema.mjs @@ -470,6 +470,14 @@ var TestServiceSchema = ServiceSchema.define({ async handler({ resWritable }) { resWritable.close(); } + }), + echoBinary: Procedure.rpc({ + requestInit: Type5.Object({ data: Type5.Uint8Array() }), + responseData: Type5.Object({ data: Type5.Uint8Array(), length: Type5.Number() }), + responseError: Type5.Never(), + async handler({ reqInit }) { + return Ok({ data: reqInit.data, length: reqInit.data.length }); + } }) }); var OrderingServiceSchema = ServiceSchema.define({ diff --git a/python-client/tests/extract_test_schema.ts b/python-client/tests/extract_test_schema.ts index 03b1ae30..48aaa506 100644 --- a/python-client/tests/extract_test_schema.ts +++ b/python-client/tests/extract_test_schema.ts @@ -52,6 +52,14 @@ const TestServiceSchema = ServiceSchema.define({ resWritable.close(); }, }), + echoBinary: Procedure.rpc({ + requestInit: Type.Object({ data: Type.Uint8Array() }), + responseData: Type.Object({ data: Type.Uint8Array(), length: Type.Number() }), + responseError: Type.Never(), + async handler({ reqInit }) { + return Ok({ data: reqInit.data, length: reqInit.data.length }); + }, + }), }); const OrderingServiceSchema = ServiceSchema.define({ diff --git a/python-client/tests/generated/_types.py b/python-client/tests/generated/_types.py index cac1af4f..fbf15c29 100644 --- a/python-client/tests/generated/_types.py +++ b/python-client/tests/generated/_types.py @@ -41,6 +41,15 @@ class TestEchoWithPrefixOutput(TypedDict): response: str +class TestEchoBinaryInit(TypedDict): + data: bytes + + +class TestEchoBinaryOutput(TypedDict): + data: bytes + length: float + + class OrderingAddInit(TypedDict): n: float diff --git a/python-client/tests/generated/test_client.py b/python-client/tests/generated/test_client.py index 57482d0b..ed29ece8 100644 --- a/python-client/tests/generated/test_client.py +++ b/python-client/tests/generated/test_client.py @@ -10,6 +10,7 @@ from ._types import ( TestAddInit, + TestEchoBinaryInit, TestEchoInit, TestEchoInput, TestEchoWithPrefixInit, @@ -97,3 +98,16 @@ def echo_with_prefix( abort_signal=abort_signal, ) return TestEchoWithPrefixStreamResult(result) + + async def echo_binary( + self, + init: TestEchoBinaryInit, + *, + abort_signal: asyncio.Event | None = None, + ) -> dict[str, Any]: + return await self._client.rpc( + "test", + "echoBinary", + init, + abort_signal=abort_signal, + ) diff --git a/python-client/tests/test_e2e.py b/python-client/tests/test_e2e.py index 38ef834a..917b7b9d 100644 --- a/python-client/tests/test_e2e.py +++ b/python-client/tests/test_e2e.py @@ -24,10 +24,13 @@ async def make_client(server_url: str, **kwargs) -> RiverClient: client_id=None, # auto-generate server_id="SERVER", codec=NaiveJsonCodec(), + ) + return RiverClient( + transport, + server_id="SERVER", connect_on_invoke=kwargs.get("connect_on_invoke", True), eagerly_connect=kwargs.get("eagerly_connect", False), ) - return RiverClient(transport, server_id="SERVER") async def cleanup_client(client: RiverClient) -> None: @@ -572,7 +575,6 @@ async def test_eagerly_connect(self, server_url: str): ws_url=server_url, server_id="SERVER", codec=NaiveJsonCodec(), - eagerly_connect=True, ) client = RiverClient(transport, server_id="SERVER", eagerly_connect=True) try: @@ -1298,3 +1300,130 @@ def test_codec_adapter_invalid_buffer(self): ok, result = adapter.from_buffer(b"not valid json") assert ok is False assert isinstance(result, str) + + +# ===================================================================== +# Lifecycle / Cleanup Tests +# ===================================================================== + + +class TestListenerCleanup: + """Verify that event listeners are cleaned up after cancel/disconnect.""" + + @pytest.mark.asyncio + async def test_cancel_cleans_up_listeners(self, server_url: str): + """Cancelling a stream removes transport event listeners.""" + transport = WebSocketClientTransport( + ws_url=server_url, + server_id="SERVER", + codec=NaiveJsonCodec(), + ) + client = RiverClient(transport, server_id="SERVER") + try: + before_msg = transport._events.listener_count("message") + before_ss = transport._events.listener_count("sessionStatus") + + abort = asyncio.Event() + stream = client.stream("cancel", "blockingStream", {}, abort_signal=abort) + # Each stream registers +1 message and +1 sessionStatus listener + assert transport._events.listener_count("message") == before_msg + 1 + + abort.set() + await stream.res_readable.next() # consume CANCEL error + + # Give the event loop a tick to run cleanup + await asyncio.sleep(0.05) + + assert transport._events.listener_count("message") == before_msg + assert transport._events.listener_count("sessionStatus") == before_ss + finally: + await transport.close() + + @pytest.mark.asyncio + async def test_repeated_cancels_do_not_leak(self, server_url: str): + """Many cancelled streams don't accumulate stale listeners.""" + transport = WebSocketClientTransport( + ws_url=server_url, + server_id="SERVER", + codec=NaiveJsonCodec(), + ) + client = RiverClient(transport, server_id="SERVER") + try: + before = transport._events.listener_count("message") + + for _ in range(20): + abort = asyncio.Event() + stream = client.stream( + "cancel", "blockingStream", {}, abort_signal=abort + ) + abort.set() + await stream.res_readable.next() + await asyncio.sleep(0.01) + + assert transport._events.listener_count("message") == before + finally: + await transport.close() + + @pytest.mark.asyncio + async def test_abort_task_cancelled_on_normal_close(self, server_url: str): + """Abort watcher task is cancelled when stream completes normally.""" + transport = WebSocketClientTransport( + ws_url=server_url, + server_id="SERVER", + codec=NaiveJsonCodec(), + ) + client = RiverClient(transport, server_id="SERVER") + try: + # Create a stream with an abort signal that is never set + abort = asyncio.Event() + stream = client.stream("test", "echo", {}, abort_signal=abort) + + stream.req_writable.write({"msg": "hi", "ignore": False}) + done, msg = await stream.res_readable.next() + assert not done and msg["ok"] is True + + stream.req_writable.close() + # Wait for server to close the stream + done2, _ = await stream.res_readable.next() + assert done2 + + await asyncio.sleep(0.05) + + # Setting the signal now should be harmless (no stale cancel) + abort.set() + await asyncio.sleep(0.05) + finally: + await transport.close() + + +class TestUploadFinalize: + """Verify upload finalize closes the request stream.""" + + @pytest.mark.asyncio + async def test_finalize_without_explicit_close(self, server_url: str): + """finalize() closes req_writable if caller didn't.""" + client = await make_client(server_url) + try: + upload = client.upload("uploadable", "addMultiple", {}) + upload.req_writable.write({"n": 5}) + upload.req_writable.write({"n": 3}) + # Don't call upload.req_writable.close() — finalize should do it + result = await asyncio.wait_for(upload.finalize(), timeout=5.0) + assert result["ok"] is True + assert result["payload"]["result"] == 8 + finally: + await cleanup_client(client) + + @pytest.mark.asyncio + async def test_finalize_after_explicit_close(self, server_url: str): + """finalize() works when req_writable was already closed.""" + client = await make_client(server_url) + try: + upload = client.upload("uploadable", "addMultiple", {}) + upload.req_writable.write({"n": 2}) + upload.req_writable.close() + result = await asyncio.wait_for(upload.finalize(), timeout=5.0) + assert result["ok"] is True + assert result["payload"]["result"] == 2 + finally: + await cleanup_client(client) diff --git a/python-client/tests/test_equivalence.py b/python-client/tests/test_equivalence.py new file mode 100644 index 00000000..55607a54 --- /dev/null +++ b/python-client/tests/test_equivalence.py @@ -0,0 +1,640 @@ +"""Cross-codec parametrized equivalence tests. + +Every test in this module runs against both NaiveJsonCodec and BinaryCodec, +proving that both codecs produce identical behavior against the TS server. +Each codec is paired with a matching server (JSON or binary). +""" + +from __future__ import annotations + +import asyncio + +import pytest + +from river.client import RiverClient +from river.codec import Codec +from river.session import SessionOptions +from river.transport import WebSocketClientTransport + +# -- helpers -- + + +async def make_client( + url: str, codec: Codec, options: SessionOptions | None = None +) -> RiverClient: + transport = WebSocketClientTransport( + ws_url=url, + client_id=None, + server_id="SERVER", + codec=codec, + options=options, + ) + return RiverClient( + transport, + server_id="SERVER", + connect_on_invoke=True, + eagerly_connect=False, + ) + + +async def cleanup(client: RiverClient) -> None: + await client.transport.close() + + +# ===================================================================== +# RPC Equivalence +# ===================================================================== + + +class TestRpcEquivalence: + @pytest.mark.asyncio + async def test_basic_rpc(self, codec_and_url: tuple[Codec, str]): + codec, url = codec_and_url + client = await make_client(url, codec) + try: + result = await client.rpc("test", "add", {"n": 3}) + assert result["ok"] is True + # test.add uses a global accumulator, so just verify it returns a number + assert isinstance(result["payload"]["result"], (int, float)) + finally: + await cleanup(client) + + @pytest.mark.asyncio + async def test_fallible_rpc_success(self, codec_and_url: tuple[Codec, str]): + codec, url = codec_and_url + client = await make_client(url, codec) + try: + result = await client.rpc("fallible", "divide", {"a": 10, "b": 2}) + assert result["ok"] is True + assert result["payload"]["result"] == 5.0 + finally: + await cleanup(client) + + @pytest.mark.asyncio + async def test_fallible_rpc_div_by_zero(self, codec_and_url: tuple[Codec, str]): + codec, url = codec_and_url + client = await make_client(url, codec) + try: + result = await client.rpc("fallible", "divide", {"a": 10, "b": 0}) + assert result["ok"] is False + assert result["payload"]["code"] == "DIV_BY_ZERO" + finally: + await cleanup(client) + + @pytest.mark.asyncio + async def test_concurrent_rpcs(self, codec_and_url: tuple[Codec, str]): + codec, url = codec_and_url + client = await make_client(url, codec) + try: + tasks = [client.rpc("ordering", "add", {"n": i}) for i in range(10)] + results = await asyncio.gather(*tasks) + for i, result in enumerate(results): + assert result["ok"] is True + assert result["payload"]["n"] == i + finally: + await cleanup(client) + + @pytest.mark.asyncio + async def test_rpc_on_closed_transport(self, codec_and_url: tuple[Codec, str]): + codec, url = codec_and_url + client = await make_client(url, codec) + await client.transport.close() + + result = await client.rpc("test", "add", {"n": 1}) + assert result["ok"] is False + assert result["payload"]["code"] == "UNEXPECTED_DISCONNECT" + + @pytest.mark.asyncio + async def test_binary_echo(self, codec_and_url: tuple[Codec, str]): + """Binary roundtrip — data passes through codec correctly.""" + codec, url = codec_and_url + client = await make_client(url, codec) + try: + test_data = b"\x00\x01\x02\xff\xfe\xfd" + result = await client.rpc("test", "echoBinary", {"data": test_data}) + assert result["ok"] is True + assert result["payload"]["length"] == len(test_data) + returned = result["payload"]["data"] + if isinstance(returned, (bytes, bytearray)): + assert bytes(returned) == test_data + finally: + await cleanup(client) + + +# ===================================================================== +# Stream Equivalence +# ===================================================================== + + +class TestStreamEquivalence: + @pytest.mark.asyncio + async def test_basic_echo_stream(self, codec_and_url: tuple[Codec, str]): + codec, url = codec_and_url + client = await make_client(url, codec) + try: + stream = client.stream("test", "echo", {}) + stream.req_writable.write({"msg": "hello", "ignore": False}) + stream.req_writable.write({"msg": "world", "ignore": False}) + stream.req_writable.write({"msg": "skip", "ignore": True}) + stream.req_writable.write({"msg": "end", "ignore": False}) + stream.req_writable.close() + + results = await stream.res_readable.collect() + assert len(results) == 3 + assert results[0]["payload"]["response"] == "hello" + assert results[1]["payload"]["response"] == "world" + assert results[2]["payload"]["response"] == "end" + finally: + await cleanup(client) + + @pytest.mark.asyncio + async def test_stream_with_init_message(self, codec_and_url: tuple[Codec, str]): + codec, url = codec_and_url + client = await make_client(url, codec) + try: + stream = client.stream("test", "echoWithPrefix", {"prefix": "pfx"}) + stream.req_writable.write({"msg": "hello", "ignore": False}) + stream.req_writable.write({"msg": "world", "ignore": False}) + stream.req_writable.close() + + results = await stream.res_readable.collect() + assert len(results) == 2 + assert results[0]["payload"]["response"] == "pfx hello" + assert results[1]["payload"]["response"] == "pfx world" + finally: + await cleanup(client) + + @pytest.mark.asyncio + async def test_empty_stream(self, codec_and_url: tuple[Codec, str]): + codec, url = codec_and_url + client = await make_client(url, codec) + try: + stream = client.stream("test", "echo", {}) + stream.req_writable.close() + results = await stream.res_readable.collect() + assert len(results) == 0 + finally: + await cleanup(client) + + @pytest.mark.asyncio + async def test_fallible_stream_ok(self, codec_and_url: tuple[Codec, str]): + codec, url = codec_and_url + client = await make_client(url, codec) + try: + stream = client.stream("fallible", "echo", {}) + stream.req_writable.write( + {"msg": "hi", "throwResult": False, "throwError": False} + ) + done, msg = await stream.res_readable.next() + assert not done + assert msg["ok"] is True + assert msg["payload"]["response"] == "hi" + stream.req_writable.close() + finally: + await cleanup(client) + + @pytest.mark.asyncio + async def test_fallible_stream_err(self, codec_and_url: tuple[Codec, str]): + codec, url = codec_and_url + client = await make_client(url, codec) + try: + stream = client.stream("fallible", "echo", {}) + stream.req_writable.write( + {"msg": "fail", "throwResult": True, "throwError": False} + ) + done, msg = await stream.res_readable.next() + assert not done + assert msg["ok"] is False + assert msg["payload"]["code"] == "STREAM_ERROR" + stream.req_writable.close() + finally: + await cleanup(client) + + @pytest.mark.asyncio + async def test_fallible_stream_uncaught(self, codec_and_url: tuple[Codec, str]): + codec, url = codec_and_url + client = await make_client(url, codec) + try: + stream = client.stream("fallible", "echo", {}) + stream.req_writable.write( + {"msg": "throw", "throwResult": False, "throwError": True} + ) + done, msg = await stream.res_readable.next() + assert not done + assert msg["ok"] is False + assert msg["payload"]["code"] == "UNCAUGHT_ERROR" + finally: + await cleanup(client) + + @pytest.mark.asyncio + async def test_concurrent_streams(self, codec_and_url: tuple[Codec, str]): + codec, url = codec_and_url + client = await make_client(url, codec) + try: + streams = [] + for _ in range(5): + s = client.stream("test", "echo", {}) + streams.append(s) + + for i, s in enumerate(streams): + s.req_writable.write({"msg": f"msg-{i}", "ignore": False}) + s.req_writable.close() + + for i, s in enumerate(streams): + results = await s.res_readable.collect() + assert len(results) == 1 + assert results[0]["payload"]["response"] == f"msg-{i}" + finally: + await cleanup(client) + + @pytest.mark.asyncio + async def test_stream_on_closed_transport(self, codec_and_url: tuple[Codec, str]): + codec, url = codec_and_url + client = await make_client(url, codec) + await client.transport.close() + + stream = client.stream("test", "echo", {}) + done, msg = await stream.res_readable.next() + assert not done + assert msg["ok"] is False + assert msg["payload"]["code"] == "UNEXPECTED_DISCONNECT" + + +# ===================================================================== +# Upload Equivalence +# ===================================================================== + + +class TestUploadEquivalence: + @pytest.mark.asyncio + async def test_basic_upload(self, codec_and_url: tuple[Codec, str]): + codec, url = codec_and_url + client = await make_client(url, codec) + try: + upload = client.upload("uploadable", "addMultiple", {}) + upload.req_writable.write({"n": 1}) + upload.req_writable.write({"n": 2}) + upload.req_writable.close() + + result = await upload.finalize() + assert result["ok"] is True + assert result["payload"]["result"] == 3 + finally: + await cleanup(client) + + @pytest.mark.asyncio + async def test_empty_upload(self, codec_and_url: tuple[Codec, str]): + codec, url = codec_and_url + client = await make_client(url, codec) + try: + upload = client.upload("uploadable", "addMultiple", {}) + upload.req_writable.close() + + result = await upload.finalize() + assert result["ok"] is True + assert result["payload"]["result"] == 0 + finally: + await cleanup(client) + + @pytest.mark.asyncio + async def test_upload_with_init_message(self, codec_and_url: tuple[Codec, str]): + codec, url = codec_and_url + client = await make_client(url, codec) + try: + upload = client.upload( + "uploadable", "addMultipleWithPrefix", {"prefix": "total"} + ) + upload.req_writable.write({"n": 5}) + upload.req_writable.write({"n": 7}) + upload.req_writable.close() + + result = await upload.finalize() + assert result["ok"] is True + assert result["payload"]["result"] == "total 12" + finally: + await cleanup(client) + + @pytest.mark.asyncio + async def test_upload_server_cancel(self, codec_and_url: tuple[Codec, str]): + codec, url = codec_and_url + client = await make_client(url, codec) + try: + upload = client.upload("uploadable", "cancellableAdd", {}) + upload.req_writable.write({"n": 9}) + upload.req_writable.write({"n": 1}) + + result = await upload.finalize() + assert result["ok"] is False + assert result["payload"]["code"] == "CANCEL" + finally: + await cleanup(client) + + @pytest.mark.asyncio + async def test_upload_finalize_auto_closes(self, codec_and_url: tuple[Codec, str]): + """finalize() auto-closes writable if not yet closed.""" + codec, url = codec_and_url + client = await make_client(url, codec) + try: + upload = client.upload("uploadable", "addMultiple", {}) + upload.req_writable.write({"n": 4}) + result = await upload.finalize() + assert result["ok"] is True + assert result["payload"]["result"] == 4 + assert not upload.req_writable.is_writable() + finally: + await cleanup(client) + + @pytest.mark.asyncio + async def test_upload_on_closed_transport(self, codec_and_url: tuple[Codec, str]): + codec, url = codec_and_url + client = await make_client(url, codec) + await client.transport.close() + + upload = client.upload("uploadable", "addMultiple", {}) + assert not upload.req_writable.is_writable() + result = await upload.finalize() + assert result["ok"] is False + assert result["payload"]["code"] == "UNEXPECTED_DISCONNECT" + + +# ===================================================================== +# Subscription Equivalence +# ===================================================================== + + +class TestSubscriptionEquivalence: + @pytest.mark.asyncio + async def test_subscription_initial_and_update( + self, codec_and_url: tuple[Codec, str] + ): + codec, url = codec_and_url + client = await make_client(url, codec) + try: + sub = client.subscribe("subscribable", "value", {}) + + done, msg = await sub.res_readable.next() + assert not done + assert msg["ok"] is True + initial_count = msg["payload"]["count"] + + add_result = await client.rpc("subscribable", "add", {"n": 1}) + assert add_result["ok"] is True + + done, msg = await sub.res_readable.next() + assert not done + assert msg["ok"] is True + assert msg["payload"]["count"] == initial_count + 1 + finally: + await cleanup(client) + + @pytest.mark.asyncio + async def test_subscription_abort(self, codec_and_url: tuple[Codec, str]): + codec, url = codec_and_url + client = await make_client(url, codec) + try: + abort_evt = asyncio.Event() + sub = client.subscribe("subscribable", "value", {}, abort_signal=abort_evt) + + done, msg = await sub.res_readable.next() + assert not done + assert msg["ok"] is True + + abort_evt.set() + await asyncio.sleep(0.05) + + done, msg = await sub.res_readable.next() + assert not done + assert msg["ok"] is False + assert msg["payload"]["code"] == "CANCEL" + finally: + await cleanup(client) + + @pytest.mark.asyncio + async def test_subscription_on_closed_transport( + self, codec_and_url: tuple[Codec, str] + ): + codec, url = codec_and_url + client = await make_client(url, codec) + await client.transport.close() + + sub = client.subscribe("subscribable", "value", {}) + done, msg = await sub.res_readable.next() + assert not done + assert msg["ok"] is False + assert msg["payload"]["code"] == "UNEXPECTED_DISCONNECT" + + +# ===================================================================== +# Cancellation Equivalence +# ===================================================================== + + +class TestCancellationEquivalence: + @pytest.mark.asyncio + async def test_cancel_rpc(self, codec_and_url: tuple[Codec, str]): + codec, url = codec_and_url + client = await make_client(url, codec) + try: + abort_evt = asyncio.Event() + + async def trigger(): + await asyncio.sleep(0.2) + abort_evt.set() + + asyncio.ensure_future(trigger()) + result = await client.rpc( + "cancel", "blockingRpc", {}, abort_signal=abort_evt + ) + assert result["ok"] is False + assert result["payload"]["code"] == "CANCEL" + finally: + await cleanup(client) + + @pytest.mark.asyncio + async def test_cancel_stream(self, codec_and_url: tuple[Codec, str]): + codec, url = codec_and_url + client = await make_client(url, codec) + try: + abort_evt = asyncio.Event() + stream = client.stream( + "cancel", "blockingStream", {}, abort_signal=abort_evt + ) + await asyncio.sleep(0.2) + abort_evt.set() + await asyncio.sleep(0) + + results = await stream.res_readable.collect() + assert len(results) == 1 + assert results[0]["ok"] is False + assert results[0]["payload"]["code"] == "CANCEL" + assert not stream.req_writable.is_writable() + finally: + await cleanup(client) + + @pytest.mark.asyncio + async def test_cancel_upload(self, codec_and_url: tuple[Codec, str]): + codec, url = codec_and_url + client = await make_client(url, codec) + try: + abort_evt = asyncio.Event() + upload = client.upload( + "cancel", "blockingUpload", {}, abort_signal=abort_evt + ) + await asyncio.sleep(0.2) + abort_evt.set() + + result = await upload.finalize() + assert result["ok"] is False + assert result["payload"]["code"] == "CANCEL" + assert not upload.req_writable.is_writable() + finally: + await cleanup(client) + + @pytest.mark.asyncio + async def test_cancel_subscription(self, codec_and_url: tuple[Codec, str]): + codec, url = codec_and_url + client = await make_client(url, codec) + try: + abort_evt = asyncio.Event() + sub = client.subscribe( + "cancel", "blockingSubscription", {}, abort_signal=abort_evt + ) + await asyncio.sleep(0.2) + abort_evt.set() + await asyncio.sleep(0) + + done, msg = await sub.res_readable.next() + assert not done + assert msg["ok"] is False + assert msg["payload"]["code"] == "CANCEL" + finally: + await cleanup(client) + + @pytest.mark.asyncio + async def test_cancel_after_completion_is_noop( + self, codec_and_url: tuple[Codec, str] + ): + """Cancelling after the procedure completed doesn't crash.""" + codec, url = codec_and_url + client = await make_client(url, codec) + try: + abort_evt = asyncio.Event() + result = await client.rpc( + "cancel", "immediateRpc", {}, abort_signal=abort_evt + ) + assert result["ok"] is True + assert result["payload"]["done"] is True + + abort_evt.set() + await asyncio.sleep(0.05) + finally: + await cleanup(client) + + @pytest.mark.asyncio + async def test_cancel_after_transport_close_is_safe( + self, codec_and_url: tuple[Codec, str] + ): + """Cancelling after transport close doesn't crash.""" + codec, url = codec_and_url + client = await make_client(url, codec) + abort_evt = asyncio.Event() + await client.rpc("cancel", "immediateRpc", {}, abort_signal=abort_evt) + await client.transport.close() + + abort_evt.set() + await asyncio.sleep(0.05) + + +# ===================================================================== +# Disconnect Equivalence +# ===================================================================== + + +class TestDisconnectEquivalence: + @pytest.mark.asyncio + async def test_all_proc_types_on_closed_transport( + self, codec_and_url: tuple[Codec, str] + ): + """All 4 procedure types return UNEXPECTED_DISCONNECT on closed transport.""" + codec, url = codec_and_url + client = await make_client(url, codec) + await client.transport.close() + + result = await client.rpc("test", "add", {"n": 1}) + assert result["ok"] is False + assert result["payload"]["code"] == "UNEXPECTED_DISCONNECT" + + stream = client.stream("test", "echo", {}) + done, msg = await stream.res_readable.next() + assert msg["ok"] is False + assert msg["payload"]["code"] == "UNEXPECTED_DISCONNECT" + + upload = client.upload("uploadable", "addMultiple", {}) + uresult = await upload.finalize() + assert uresult["ok"] is False + assert uresult["payload"]["code"] == "UNEXPECTED_DISCONNECT" + + sub = client.subscribe("subscribable", "value", {}) + done, msg = await sub.res_readable.next() + assert msg["ok"] is False + assert msg["payload"]["code"] == "UNEXPECTED_DISCONNECT" + + @pytest.mark.asyncio + async def test_mid_stream_disconnect(self, codec_and_url: tuple[Codec, str]): + """Force-closing the WS mid-stream produces disconnect error.""" + codec, url = codec_and_url + short_opts = SessionOptions(session_disconnect_grace_ms=200) + client = await make_client(url, codec, options=short_opts) + try: + # Disable reconnect so session gets destroyed + client.transport.reconnect_on_connection_drop = False + + stream = client.stream("test", "echo", {}) + stream.req_writable.write({"msg": "before", "ignore": False}) + + done, msg = await stream.res_readable.next() + assert not done + assert msg["ok"] is True + assert msg["payload"]["response"] == "before" + + session = client.transport.sessions.get("SERVER") + assert session is not None + if session._ws is not None: + await session._ws.close() + + # Wait for short grace period to expire + await asyncio.sleep(0.4) + + # Session destroyed → stream gets UNEXPECTED_DISCONNECT + done, msg = await stream.res_readable.next() + if not done: + assert msg["ok"] is False + assert msg["payload"]["code"] == "UNEXPECTED_DISCONNECT" + finally: + await cleanup(client) + + +# ===================================================================== +# Ordering Equivalence +# ===================================================================== + + +class TestOrderingEquivalence: + @pytest.mark.asyncio + async def test_concurrent_rpc_ordering(self, codec_and_url: tuple[Codec, str]): + """N concurrent RPCs to ordering service all arrive, responses match.""" + codec, url = codec_and_url + client = await make_client(url, codec) + try: + n = 20 + tasks = [client.rpc("ordering", "add", {"n": i}) for i in range(n)] + results = await asyncio.gather(*tasks) + + returned_ns = [] + for r in results: + assert r["ok"] is True + returned_ns.append(r["payload"]["n"]) + + assert sorted(returned_ns) == list(range(n)) + finally: + await cleanup(client) diff --git a/python-client/tests/test_handshake.py b/python-client/tests/test_handshake.py new file mode 100644 index 00000000..549d9ee8 --- /dev/null +++ b/python-client/tests/test_handshake.py @@ -0,0 +1,140 @@ +"""Handshake metadata tests. + +Tests custom handshake metadata using a dedicated test server +that requires {token: string} in the handshake. +""" + +from __future__ import annotations + +import asyncio + +import pytest + +from river.client import RiverClient +from river.codec import NaiveJsonCodec +from river.transport import WebSocketClientTransport + + +async def make_handshake_client( + server_url: str, + handshake_metadata: dict | None = None, +) -> RiverClient: + transport = WebSocketClientTransport( + ws_url=server_url, + client_id=None, + server_id="HANDSHAKE_SERVER", + codec=NaiveJsonCodec(), + handshake_metadata=handshake_metadata, + ) + return RiverClient( + transport, + server_id="HANDSHAKE_SERVER", + connect_on_invoke=True, + eagerly_connect=False, + ) + + +async def cleanup(client: RiverClient) -> None: + await client.transport.close() + + +class TestHandshake: + @pytest.mark.asyncio + async def test_handshake_with_valid_metadata(self, handshake_server_url: str): + """Client with valid handshake metadata can make RPCs.""" + client = await make_handshake_client( + handshake_server_url, + handshake_metadata={"token": "valid-token"}, + ) + try: + result = await client.rpc("test", "echo", {"msg": "hello"}) + assert result["ok"] is True + assert result["payload"]["response"] == "hello" + finally: + await cleanup(client) + + @pytest.mark.asyncio + async def test_handshake_with_invalid_metadata_emits_error( + self, handshake_server_url: str + ): + """Client with invalid token triggers a protocolError event.""" + transport = WebSocketClientTransport( + ws_url=handshake_server_url, + client_id=None, + server_id="HANDSHAKE_SERVER", + codec=NaiveJsonCodec(), + handshake_metadata={"token": "wrong-token"}, + ) + + error_event = asyncio.Event() + errors: list[dict] = [] + + def on_error(e: dict) -> None: + errors.append(e) + error_event.set() + + transport.add_event_listener("protocolError", on_error) + + try: + transport.connect("HANDSHAKE_SERVER") + # Wait for the handshake failure event + await asyncio.wait_for(error_event.wait(), timeout=5.0) + assert len(errors) > 0 + assert errors[0]["type"] in ("handshake_failed", "conn_retry_exceeded") + finally: + await transport.close() + + @pytest.mark.asyncio + async def test_handshake_with_missing_metadata_emits_error( + self, handshake_server_url: str + ): + """Client with no metadata triggers a protocolError event.""" + transport = WebSocketClientTransport( + ws_url=handshake_server_url, + client_id=None, + server_id="HANDSHAKE_SERVER", + codec=NaiveJsonCodec(), + handshake_metadata=None, + ) + + error_event = asyncio.Event() + errors: list[dict] = [] + + def on_error(e: dict) -> None: + errors.append(e) + error_event.set() + + transport.add_event_listener("protocolError", on_error) + + try: + transport.connect("HANDSHAKE_SERVER") + await asyncio.wait_for(error_event.wait(), timeout=5.0) + assert len(errors) > 0 + finally: + await transport.close() + + @pytest.mark.asyncio + async def test_handshake_metadata_across_reconnect(self, handshake_server_url: str): + """Metadata is resent when reconnecting.""" + client = await make_handshake_client( + handshake_server_url, + handshake_metadata={"token": "valid-token"}, + ) + try: + result = await client.rpc("test", "echo", {"msg": "first"}) + assert result["ok"] is True + + session = client.transport.sessions.get("HANDSHAKE_SERVER") + assert session is not None + + ws = session._ws + if ws is not None: + await ws.close() + + await asyncio.sleep(0.5) + + result = await client.rpc("test", "echo", {"msg": "after-reconnect"}) + assert result["ok"] is True + assert result["payload"]["response"] == "after-reconnect" + finally: + await cleanup(client) diff --git a/python-client/tests/test_schema.json b/python-client/tests/test_schema.json index e0c6bfa2..b02ac0c0 100644 --- a/python-client/tests/test_schema.json +++ b/python-client/tests/test_schema.json @@ -391,6 +391,133 @@ "msg" ] } + }, + "echoBinary": { + "init": { + "type": "object", + "properties": { + "data": { + "type": "Uint8Array" + } + }, + "required": [ + "data" + ] + }, + "output": { + "type": "object", + "properties": { + "data": { + "type": "Uint8Array" + }, + "length": { + "type": "number" + } + }, + "required": [ + "data", + "length" + ] + }, + "errors": { + "anyOf": [ + { + "type": "object", + "properties": { + "code": { + "const": "UNCAUGHT_ERROR", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "UNEXPECTED_DISCONNECT", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "INVALID_REQUEST", + "type": "string" + }, + "message": { + "type": "string" + }, + "extras": { + "type": "object", + "properties": { + "firstValidationErrors": { + "type": "array", + "items": { + "type": "object", + "properties": { + "path": { + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "path", + "message" + ] + } + }, + "totalErrors": { + "type": "number" + } + }, + "required": [ + "firstValidationErrors", + "totalErrors" + ] + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "CANCEL", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + } + ] + }, + "type": "rpc" } } }, diff --git a/python-client/tests/test_server.ts b/python-client/tests/test_server.ts index eb35e0b9..13f18efe 100644 --- a/python-client/tests/test_server.ts +++ b/python-client/tests/test_server.ts @@ -18,6 +18,8 @@ import { Err, } from '../../router'; import { Type } from '@sinclair/typebox'; +import { BinaryCodec } from '../../codec/binary'; +import { NaiveJsonCodec } from '../../codec/json'; const ServiceSchema = createServiceSchema(); @@ -73,6 +75,14 @@ const TestServiceSchema = ServiceSchema.define({ resWritable.close(); }, }), + echoBinary: Procedure.rpc({ + requestInit: Type.Object({ data: Type.Uint8Array() }), + responseData: Type.Object({ data: Type.Uint8Array(), length: Type.Number() }), + responseError: Type.Never(), + async handler({ reqInit }) { + return Ok({ data: reqInit.data, length: reqInit.data.length }); + }, + }), }); // ------------------------------------------------------------------- @@ -399,6 +409,8 @@ const services = { }; async function main() { + const codec = process.env.RIVER_CODEC === 'binary' ? BinaryCodec : NaiveJsonCodec; + const httpServer = http.createServer(); const port = await new Promise((resolve, reject) => { httpServer.listen(0, '127.0.0.1', () => { @@ -409,7 +421,7 @@ async function main() { }); const wss = new WebSocketServer({ server: httpServer }); - const serverTransport = new WebSocketServerTransport(wss, 'SERVER'); + const serverTransport = new WebSocketServerTransport(wss, 'SERVER', { codec }); const _server = createServer(serverTransport, services); // Signal that the server is ready by printing the port diff --git a/python-client/tests/test_server_handshake.mjs b/python-client/tests/test_server_handshake.mjs new file mode 100644 index 00000000..12756031 --- /dev/null +++ b/python-client/tests/test_server_handshake.mjs @@ -0,0 +1,4534 @@ +// python-client/tests/test_server_handshake.ts +import http from "node:http"; +import { WebSocketServer } from "ws"; + +// node_modules/nanoid/index.js +import { webcrypto as crypto } from "node:crypto"; +var POOL_SIZE_MULTIPLIER = 128; +var pool; +var poolOffset; +function fillPool(bytes) { + if (!pool || pool.length < bytes) { + pool = Buffer.allocUnsafe(bytes * POOL_SIZE_MULTIPLIER); + crypto.getRandomValues(pool); + poolOffset = 0; + } else if (poolOffset + bytes > pool.length) { + crypto.getRandomValues(pool); + poolOffset = 0; + } + poolOffset += bytes; +} +function random(bytes) { + fillPool(bytes |= 0); + return pool.subarray(poolOffset - bytes, poolOffset); +} +function customRandom(alphabet2, defaultSize, getRandom) { + let mask = (2 << 31 - Math.clz32(alphabet2.length - 1 | 1)) - 1; + let step = Math.ceil(1.6 * mask * defaultSize / alphabet2.length); + return (size = defaultSize) => { + let id = ""; + while (true) { + let bytes = getRandom(step); + let i = step; + while (i--) { + id += alphabet2[bytes[i] & mask] || ""; + if (id.length >= size) return id; + } + } + }; +} +function customAlphabet(alphabet2, size = 21) { + return customRandom(alphabet2, size, random); +} + +// transport/id.ts +var alphabet = customAlphabet( + "1234567890abcdefghijklmnopqrstuvxyzABCDEFGHIJKLMNOPQRSTUVXYZ" +); +var generateId = () => alphabet(12); + +// transport/connection.ts +var Connection = class { + id; + telemetry; + constructor() { + this.id = `conn-${generateId()}`; + } + get loggingMetadata() { + const metadata = { connId: this.id }; + if (this.telemetry?.span.isRecording()) { + const spanContext = this.telemetry.span.spanContext(); + metadata.telemetry = { + traceId: spanContext.traceId, + spanId: spanContext.spanId + }; + } + return metadata; + } + dataListener; + closeListener; + errorListener; + onData(msg) { + this.dataListener?.(msg); + } + onError(err) { + this.errorListener?.(err); + } + onClose() { + this.closeListener?.(); + this.telemetry?.span.end(); + } + /** + * Set the callback for when a message is received. + * @param cb The message handler callback. + */ + setDataListener(cb) { + this.dataListener = cb; + } + removeDataListener() { + this.dataListener = void 0; + } + /** + * Set the callback for when the connection is closed. + * This should also be called if an error happens and after notifying the error listener. + * @param cb The callback to call when the connection is closed. + */ + setCloseListener(cb) { + this.closeListener = cb; + } + removeCloseListener() { + this.closeListener = void 0; + } + /** + * Set the callback for when an error is received. + * This should only be used for logging errors, all cleanup + * should be delegated to setCloseListener. + * + * The implementer should take care such that the implemented + * connection will call both the close and error callbacks + * on an error. + * + * @param cb The callback to call when an error is received. + */ + setErrorListener(cb) { + this.errorListener = cb; + } + removeErrorListener() { + this.errorListener = void 0; + } +}; + +// transport/impls/ws/connection.ts +var WS_HEALTHY_CLOSE_CODE = 1e3; +var WebSocketCloseError = class extends Error { + code; + reason; + constructor(code, reason) { + super(`websocket closed with code and reason: ${code} - ${reason}`); + this.code = code; + this.reason = reason; + } +}; +var WebSocketConnection = class extends Connection { + ws; + extras; + get loggingMetadata() { + const metadata = super.loggingMetadata; + if (this.extras) { + metadata.extras = this.extras; + } + return metadata; + } + constructor(ws, extras) { + super(); + this.ws = ws; + this.extras = extras; + this.ws.binaryType = "arraybuffer"; + let didError = false; + this.ws.onerror = () => { + didError = true; + }; + this.ws.onclose = ({ code, reason }) => { + if (didError) { + const err = new WebSocketCloseError(code, reason); + this.onError(err); + } + this.onClose(); + }; + this.ws.onmessage = (msg) => { + this.onData(msg.data); + }; + } + send(payload) { + try { + this.ws.send(payload); + return true; + } catch { + return false; + } + } + close() { + this.ws.close(WS_HEALTHY_CLOSE_CODE); + } +}; + +// node_modules/@opentelemetry/api/build/esm/platform/node/globalThis.js +var _globalThis = typeof globalThis === "object" ? globalThis : global; + +// node_modules/@opentelemetry/api/build/esm/version.js +var VERSION = "1.8.0"; + +// node_modules/@opentelemetry/api/build/esm/internal/semver.js +var re = /^(\d+)\.(\d+)\.(\d+)(-(.+))?$/; +function _makeCompatibilityCheck(ownVersion) { + var acceptedVersions = /* @__PURE__ */ new Set([ownVersion]); + var rejectedVersions = /* @__PURE__ */ new Set(); + var myVersionMatch = ownVersion.match(re); + if (!myVersionMatch) { + return function() { + return false; + }; + } + var ownVersionParsed = { + major: +myVersionMatch[1], + minor: +myVersionMatch[2], + patch: +myVersionMatch[3], + prerelease: myVersionMatch[4] + }; + if (ownVersionParsed.prerelease != null) { + return function isExactmatch(globalVersion) { + return globalVersion === ownVersion; + }; + } + function _reject(v) { + rejectedVersions.add(v); + return false; + } + function _accept(v) { + acceptedVersions.add(v); + return true; + } + return function isCompatible2(globalVersion) { + if (acceptedVersions.has(globalVersion)) { + return true; + } + if (rejectedVersions.has(globalVersion)) { + return false; + } + var globalVersionMatch = globalVersion.match(re); + if (!globalVersionMatch) { + return _reject(globalVersion); + } + var globalVersionParsed = { + major: +globalVersionMatch[1], + minor: +globalVersionMatch[2], + patch: +globalVersionMatch[3], + prerelease: globalVersionMatch[4] + }; + if (globalVersionParsed.prerelease != null) { + return _reject(globalVersion); + } + if (ownVersionParsed.major !== globalVersionParsed.major) { + return _reject(globalVersion); + } + if (ownVersionParsed.major === 0) { + if (ownVersionParsed.minor === globalVersionParsed.minor && ownVersionParsed.patch <= globalVersionParsed.patch) { + return _accept(globalVersion); + } + return _reject(globalVersion); + } + if (ownVersionParsed.minor <= globalVersionParsed.minor) { + return _accept(globalVersion); + } + return _reject(globalVersion); + }; +} +var isCompatible = _makeCompatibilityCheck(VERSION); + +// node_modules/@opentelemetry/api/build/esm/internal/global-utils.js +var major = VERSION.split(".")[0]; +var GLOBAL_OPENTELEMETRY_API_KEY = Symbol.for("opentelemetry.js.api." + major); +var _global = _globalThis; +function registerGlobal(type, instance, diag2, allowOverride) { + var _a; + if (allowOverride === void 0) { + allowOverride = false; + } + var api = _global[GLOBAL_OPENTELEMETRY_API_KEY] = (_a = _global[GLOBAL_OPENTELEMETRY_API_KEY]) !== null && _a !== void 0 ? _a : { + version: VERSION + }; + if (!allowOverride && api[type]) { + var err = new Error("@opentelemetry/api: Attempted duplicate registration of API: " + type); + diag2.error(err.stack || err.message); + return false; + } + if (api.version !== VERSION) { + var err = new Error("@opentelemetry/api: Registration of version v" + api.version + " for " + type + " does not match previously registered API v" + VERSION); + diag2.error(err.stack || err.message); + return false; + } + api[type] = instance; + diag2.debug("@opentelemetry/api: Registered a global for " + type + " v" + VERSION + "."); + return true; +} +function getGlobal(type) { + var _a, _b; + var globalVersion = (_a = _global[GLOBAL_OPENTELEMETRY_API_KEY]) === null || _a === void 0 ? void 0 : _a.version; + if (!globalVersion || !isCompatible(globalVersion)) { + return; + } + return (_b = _global[GLOBAL_OPENTELEMETRY_API_KEY]) === null || _b === void 0 ? void 0 : _b[type]; +} +function unregisterGlobal(type, diag2) { + diag2.debug("@opentelemetry/api: Unregistering a global for " + type + " v" + VERSION + "."); + var api = _global[GLOBAL_OPENTELEMETRY_API_KEY]; + if (api) { + delete api[type]; + } +} + +// node_modules/@opentelemetry/api/build/esm/diag/ComponentLogger.js +var __read = function(o, n) { + var m = typeof Symbol === "function" && o[Symbol.iterator]; + if (!m) return o; + var i = m.call(o), r, ar = [], e; + try { + while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value); + } catch (error) { + e = { error }; + } finally { + try { + if (r && !r.done && (m = i["return"])) m.call(i); + } finally { + if (e) throw e.error; + } + } + return ar; +}; +var __spreadArray = function(to, from, pack) { + if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) { + if (ar || !(i in from)) { + if (!ar) ar = Array.prototype.slice.call(from, 0, i); + ar[i] = from[i]; + } + } + return to.concat(ar || Array.prototype.slice.call(from)); +}; +var DiagComponentLogger = ( + /** @class */ + function() { + function DiagComponentLogger2(props) { + this._namespace = props.namespace || "DiagComponentLogger"; + } + DiagComponentLogger2.prototype.debug = function() { + var args = []; + for (var _i = 0; _i < arguments.length; _i++) { + args[_i] = arguments[_i]; + } + return logProxy("debug", this._namespace, args); + }; + DiagComponentLogger2.prototype.error = function() { + var args = []; + for (var _i = 0; _i < arguments.length; _i++) { + args[_i] = arguments[_i]; + } + return logProxy("error", this._namespace, args); + }; + DiagComponentLogger2.prototype.info = function() { + var args = []; + for (var _i = 0; _i < arguments.length; _i++) { + args[_i] = arguments[_i]; + } + return logProxy("info", this._namespace, args); + }; + DiagComponentLogger2.prototype.warn = function() { + var args = []; + for (var _i = 0; _i < arguments.length; _i++) { + args[_i] = arguments[_i]; + } + return logProxy("warn", this._namespace, args); + }; + DiagComponentLogger2.prototype.verbose = function() { + var args = []; + for (var _i = 0; _i < arguments.length; _i++) { + args[_i] = arguments[_i]; + } + return logProxy("verbose", this._namespace, args); + }; + return DiagComponentLogger2; + }() +); +function logProxy(funcName, namespace, args) { + var logger = getGlobal("diag"); + if (!logger) { + return; + } + args.unshift(namespace); + return logger[funcName].apply(logger, __spreadArray([], __read(args), false)); +} + +// node_modules/@opentelemetry/api/build/esm/diag/types.js +var DiagLogLevel; +(function(DiagLogLevel2) { + DiagLogLevel2[DiagLogLevel2["NONE"] = 0] = "NONE"; + DiagLogLevel2[DiagLogLevel2["ERROR"] = 30] = "ERROR"; + DiagLogLevel2[DiagLogLevel2["WARN"] = 50] = "WARN"; + DiagLogLevel2[DiagLogLevel2["INFO"] = 60] = "INFO"; + DiagLogLevel2[DiagLogLevel2["DEBUG"] = 70] = "DEBUG"; + DiagLogLevel2[DiagLogLevel2["VERBOSE"] = 80] = "VERBOSE"; + DiagLogLevel2[DiagLogLevel2["ALL"] = 9999] = "ALL"; +})(DiagLogLevel || (DiagLogLevel = {})); + +// node_modules/@opentelemetry/api/build/esm/diag/internal/logLevelLogger.js +function createLogLevelDiagLogger(maxLevel, logger) { + if (maxLevel < DiagLogLevel.NONE) { + maxLevel = DiagLogLevel.NONE; + } else if (maxLevel > DiagLogLevel.ALL) { + maxLevel = DiagLogLevel.ALL; + } + logger = logger || {}; + function _filterFunc(funcName, theLevel) { + var theFunc = logger[funcName]; + if (typeof theFunc === "function" && maxLevel >= theLevel) { + return theFunc.bind(logger); + } + return function() { + }; + } + return { + error: _filterFunc("error", DiagLogLevel.ERROR), + warn: _filterFunc("warn", DiagLogLevel.WARN), + info: _filterFunc("info", DiagLogLevel.INFO), + debug: _filterFunc("debug", DiagLogLevel.DEBUG), + verbose: _filterFunc("verbose", DiagLogLevel.VERBOSE) + }; +} + +// node_modules/@opentelemetry/api/build/esm/api/diag.js +var __read2 = function(o, n) { + var m = typeof Symbol === "function" && o[Symbol.iterator]; + if (!m) return o; + var i = m.call(o), r, ar = [], e; + try { + while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value); + } catch (error) { + e = { error }; + } finally { + try { + if (r && !r.done && (m = i["return"])) m.call(i); + } finally { + if (e) throw e.error; + } + } + return ar; +}; +var __spreadArray2 = function(to, from, pack) { + if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) { + if (ar || !(i in from)) { + if (!ar) ar = Array.prototype.slice.call(from, 0, i); + ar[i] = from[i]; + } + } + return to.concat(ar || Array.prototype.slice.call(from)); +}; +var API_NAME = "diag"; +var DiagAPI = ( + /** @class */ + function() { + function DiagAPI2() { + function _logProxy(funcName) { + return function() { + var args = []; + for (var _i = 0; _i < arguments.length; _i++) { + args[_i] = arguments[_i]; + } + var logger = getGlobal("diag"); + if (!logger) + return; + return logger[funcName].apply(logger, __spreadArray2([], __read2(args), false)); + }; + } + var self = this; + var setLogger = function(logger, optionsOrLogLevel) { + var _a, _b, _c; + if (optionsOrLogLevel === void 0) { + optionsOrLogLevel = { logLevel: DiagLogLevel.INFO }; + } + if (logger === self) { + var err = new Error("Cannot use diag as the logger for itself. Please use a DiagLogger implementation like ConsoleDiagLogger or a custom implementation"); + self.error((_a = err.stack) !== null && _a !== void 0 ? _a : err.message); + return false; + } + if (typeof optionsOrLogLevel === "number") { + optionsOrLogLevel = { + logLevel: optionsOrLogLevel + }; + } + var oldLogger = getGlobal("diag"); + var newLogger = createLogLevelDiagLogger((_b = optionsOrLogLevel.logLevel) !== null && _b !== void 0 ? _b : DiagLogLevel.INFO, logger); + if (oldLogger && !optionsOrLogLevel.suppressOverrideMessage) { + var stack = (_c = new Error().stack) !== null && _c !== void 0 ? _c : ""; + oldLogger.warn("Current logger will be overwritten from " + stack); + newLogger.warn("Current logger will overwrite one already registered from " + stack); + } + return registerGlobal("diag", newLogger, self, true); + }; + self.setLogger = setLogger; + self.disable = function() { + unregisterGlobal(API_NAME, self); + }; + self.createComponentLogger = function(options) { + return new DiagComponentLogger(options); + }; + self.verbose = _logProxy("verbose"); + self.debug = _logProxy("debug"); + self.info = _logProxy("info"); + self.warn = _logProxy("warn"); + self.error = _logProxy("error"); + } + DiagAPI2.instance = function() { + if (!this._instance) { + this._instance = new DiagAPI2(); + } + return this._instance; + }; + return DiagAPI2; + }() +); + +// node_modules/@opentelemetry/api/build/esm/baggage/internal/baggage-impl.js +var __read3 = function(o, n) { + var m = typeof Symbol === "function" && o[Symbol.iterator]; + if (!m) return o; + var i = m.call(o), r, ar = [], e; + try { + while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value); + } catch (error) { + e = { error }; + } finally { + try { + if (r && !r.done && (m = i["return"])) m.call(i); + } finally { + if (e) throw e.error; + } + } + return ar; +}; +var __values = function(o) { + var s = typeof Symbol === "function" && Symbol.iterator, m = s && o[s], i = 0; + if (m) return m.call(o); + if (o && typeof o.length === "number") return { + next: function() { + if (o && i >= o.length) o = void 0; + return { value: o && o[i++], done: !o }; + } + }; + throw new TypeError(s ? "Object is not iterable." : "Symbol.iterator is not defined."); +}; +var BaggageImpl = ( + /** @class */ + function() { + function BaggageImpl2(entries) { + this._entries = entries ? new Map(entries) : /* @__PURE__ */ new Map(); + } + BaggageImpl2.prototype.getEntry = function(key) { + var entry = this._entries.get(key); + if (!entry) { + return void 0; + } + return Object.assign({}, entry); + }; + BaggageImpl2.prototype.getAllEntries = function() { + return Array.from(this._entries.entries()).map(function(_a) { + var _b = __read3(_a, 2), k = _b[0], v = _b[1]; + return [k, v]; + }); + }; + BaggageImpl2.prototype.setEntry = function(key, entry) { + var newBaggage = new BaggageImpl2(this._entries); + newBaggage._entries.set(key, entry); + return newBaggage; + }; + BaggageImpl2.prototype.removeEntry = function(key) { + var newBaggage = new BaggageImpl2(this._entries); + newBaggage._entries.delete(key); + return newBaggage; + }; + BaggageImpl2.prototype.removeEntries = function() { + var e_1, _a; + var keys = []; + for (var _i = 0; _i < arguments.length; _i++) { + keys[_i] = arguments[_i]; + } + var newBaggage = new BaggageImpl2(this._entries); + try { + for (var keys_1 = __values(keys), keys_1_1 = keys_1.next(); !keys_1_1.done; keys_1_1 = keys_1.next()) { + var key = keys_1_1.value; + newBaggage._entries.delete(key); + } + } catch (e_1_1) { + e_1 = { error: e_1_1 }; + } finally { + try { + if (keys_1_1 && !keys_1_1.done && (_a = keys_1.return)) _a.call(keys_1); + } finally { + if (e_1) throw e_1.error; + } + } + return newBaggage; + }; + BaggageImpl2.prototype.clear = function() { + return new BaggageImpl2(); + }; + return BaggageImpl2; + }() +); + +// node_modules/@opentelemetry/api/build/esm/baggage/utils.js +var diag = DiagAPI.instance(); +function createBaggage(entries) { + if (entries === void 0) { + entries = {}; + } + return new BaggageImpl(new Map(Object.entries(entries))); +} + +// node_modules/@opentelemetry/api/build/esm/context/context.js +function createContextKey(description) { + return Symbol.for(description); +} +var BaseContext = ( + /** @class */ + /* @__PURE__ */ function() { + function BaseContext2(parentContext) { + var self = this; + self._currentContext = parentContext ? new Map(parentContext) : /* @__PURE__ */ new Map(); + self.getValue = function(key) { + return self._currentContext.get(key); + }; + self.setValue = function(key, value) { + var context2 = new BaseContext2(self._currentContext); + context2._currentContext.set(key, value); + return context2; + }; + self.deleteValue = function(key) { + var context2 = new BaseContext2(self._currentContext); + context2._currentContext.delete(key); + return context2; + }; + } + return BaseContext2; + }() +); +var ROOT_CONTEXT = new BaseContext(); + +// node_modules/@opentelemetry/api/build/esm/propagation/TextMapPropagator.js +var defaultTextMapGetter = { + get: function(carrier, key) { + if (carrier == null) { + return void 0; + } + return carrier[key]; + }, + keys: function(carrier) { + if (carrier == null) { + return []; + } + return Object.keys(carrier); + } +}; +var defaultTextMapSetter = { + set: function(carrier, key, value) { + if (carrier == null) { + return; + } + carrier[key] = value; + } +}; + +// node_modules/@opentelemetry/api/build/esm/context/NoopContextManager.js +var __read4 = function(o, n) { + var m = typeof Symbol === "function" && o[Symbol.iterator]; + if (!m) return o; + var i = m.call(o), r, ar = [], e; + try { + while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value); + } catch (error) { + e = { error }; + } finally { + try { + if (r && !r.done && (m = i["return"])) m.call(i); + } finally { + if (e) throw e.error; + } + } + return ar; +}; +var __spreadArray3 = function(to, from, pack) { + if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) { + if (ar || !(i in from)) { + if (!ar) ar = Array.prototype.slice.call(from, 0, i); + ar[i] = from[i]; + } + } + return to.concat(ar || Array.prototype.slice.call(from)); +}; +var NoopContextManager = ( + /** @class */ + function() { + function NoopContextManager2() { + } + NoopContextManager2.prototype.active = function() { + return ROOT_CONTEXT; + }; + NoopContextManager2.prototype.with = function(_context, fn, thisArg) { + var args = []; + for (var _i = 3; _i < arguments.length; _i++) { + args[_i - 3] = arguments[_i]; + } + return fn.call.apply(fn, __spreadArray3([thisArg], __read4(args), false)); + }; + NoopContextManager2.prototype.bind = function(_context, target) { + return target; + }; + NoopContextManager2.prototype.enable = function() { + return this; + }; + NoopContextManager2.prototype.disable = function() { + return this; + }; + return NoopContextManager2; + }() +); + +// node_modules/@opentelemetry/api/build/esm/api/context.js +var __read5 = function(o, n) { + var m = typeof Symbol === "function" && o[Symbol.iterator]; + if (!m) return o; + var i = m.call(o), r, ar = [], e; + try { + while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value); + } catch (error) { + e = { error }; + } finally { + try { + if (r && !r.done && (m = i["return"])) m.call(i); + } finally { + if (e) throw e.error; + } + } + return ar; +}; +var __spreadArray4 = function(to, from, pack) { + if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) { + if (ar || !(i in from)) { + if (!ar) ar = Array.prototype.slice.call(from, 0, i); + ar[i] = from[i]; + } + } + return to.concat(ar || Array.prototype.slice.call(from)); +}; +var API_NAME2 = "context"; +var NOOP_CONTEXT_MANAGER = new NoopContextManager(); +var ContextAPI = ( + /** @class */ + function() { + function ContextAPI2() { + } + ContextAPI2.getInstance = function() { + if (!this._instance) { + this._instance = new ContextAPI2(); + } + return this._instance; + }; + ContextAPI2.prototype.setGlobalContextManager = function(contextManager) { + return registerGlobal(API_NAME2, contextManager, DiagAPI.instance()); + }; + ContextAPI2.prototype.active = function() { + return this._getContextManager().active(); + }; + ContextAPI2.prototype.with = function(context2, fn, thisArg) { + var _a; + var args = []; + for (var _i = 3; _i < arguments.length; _i++) { + args[_i - 3] = arguments[_i]; + } + return (_a = this._getContextManager()).with.apply(_a, __spreadArray4([context2, fn, thisArg], __read5(args), false)); + }; + ContextAPI2.prototype.bind = function(context2, target) { + return this._getContextManager().bind(context2, target); + }; + ContextAPI2.prototype._getContextManager = function() { + return getGlobal(API_NAME2) || NOOP_CONTEXT_MANAGER; + }; + ContextAPI2.prototype.disable = function() { + this._getContextManager().disable(); + unregisterGlobal(API_NAME2, DiagAPI.instance()); + }; + return ContextAPI2; + }() +); + +// node_modules/@opentelemetry/api/build/esm/trace/trace_flags.js +var TraceFlags; +(function(TraceFlags2) { + TraceFlags2[TraceFlags2["NONE"] = 0] = "NONE"; + TraceFlags2[TraceFlags2["SAMPLED"] = 1] = "SAMPLED"; +})(TraceFlags || (TraceFlags = {})); + +// node_modules/@opentelemetry/api/build/esm/trace/invalid-span-constants.js +var INVALID_SPANID = "0000000000000000"; +var INVALID_TRACEID = "00000000000000000000000000000000"; +var INVALID_SPAN_CONTEXT = { + traceId: INVALID_TRACEID, + spanId: INVALID_SPANID, + traceFlags: TraceFlags.NONE +}; + +// node_modules/@opentelemetry/api/build/esm/trace/NonRecordingSpan.js +var NonRecordingSpan = ( + /** @class */ + function() { + function NonRecordingSpan2(_spanContext) { + if (_spanContext === void 0) { + _spanContext = INVALID_SPAN_CONTEXT; + } + this._spanContext = _spanContext; + } + NonRecordingSpan2.prototype.spanContext = function() { + return this._spanContext; + }; + NonRecordingSpan2.prototype.setAttribute = function(_key, _value) { + return this; + }; + NonRecordingSpan2.prototype.setAttributes = function(_attributes) { + return this; + }; + NonRecordingSpan2.prototype.addEvent = function(_name, _attributes) { + return this; + }; + NonRecordingSpan2.prototype.setStatus = function(_status) { + return this; + }; + NonRecordingSpan2.prototype.updateName = function(_name) { + return this; + }; + NonRecordingSpan2.prototype.end = function(_endTime) { + }; + NonRecordingSpan2.prototype.isRecording = function() { + return false; + }; + NonRecordingSpan2.prototype.recordException = function(_exception, _time) { + }; + return NonRecordingSpan2; + }() +); + +// node_modules/@opentelemetry/api/build/esm/trace/context-utils.js +var SPAN_KEY = createContextKey("OpenTelemetry Context Key SPAN"); +function getSpan(context2) { + return context2.getValue(SPAN_KEY) || void 0; +} +function getActiveSpan() { + return getSpan(ContextAPI.getInstance().active()); +} +function setSpan(context2, span) { + return context2.setValue(SPAN_KEY, span); +} +function deleteSpan(context2) { + return context2.deleteValue(SPAN_KEY); +} +function setSpanContext(context2, spanContext) { + return setSpan(context2, new NonRecordingSpan(spanContext)); +} +function getSpanContext(context2) { + var _a; + return (_a = getSpan(context2)) === null || _a === void 0 ? void 0 : _a.spanContext(); +} + +// node_modules/@opentelemetry/api/build/esm/trace/spancontext-utils.js +var VALID_TRACEID_REGEX = /^([0-9a-f]{32})$/i; +var VALID_SPANID_REGEX = /^[0-9a-f]{16}$/i; +function isValidTraceId(traceId) { + return VALID_TRACEID_REGEX.test(traceId) && traceId !== INVALID_TRACEID; +} +function isValidSpanId(spanId) { + return VALID_SPANID_REGEX.test(spanId) && spanId !== INVALID_SPANID; +} +function isSpanContextValid(spanContext) { + return isValidTraceId(spanContext.traceId) && isValidSpanId(spanContext.spanId); +} +function wrapSpanContext(spanContext) { + return new NonRecordingSpan(spanContext); +} + +// node_modules/@opentelemetry/api/build/esm/trace/NoopTracer.js +var contextApi = ContextAPI.getInstance(); +var NoopTracer = ( + /** @class */ + function() { + function NoopTracer2() { + } + NoopTracer2.prototype.startSpan = function(name, options, context2) { + if (context2 === void 0) { + context2 = contextApi.active(); + } + var root = Boolean(options === null || options === void 0 ? void 0 : options.root); + if (root) { + return new NonRecordingSpan(); + } + var parentFromContext = context2 && getSpanContext(context2); + if (isSpanContext(parentFromContext) && isSpanContextValid(parentFromContext)) { + return new NonRecordingSpan(parentFromContext); + } else { + return new NonRecordingSpan(); + } + }; + NoopTracer2.prototype.startActiveSpan = function(name, arg2, arg3, arg4) { + var opts; + var ctx; + var fn; + if (arguments.length < 2) { + return; + } else if (arguments.length === 2) { + fn = arg2; + } else if (arguments.length === 3) { + opts = arg2; + fn = arg3; + } else { + opts = arg2; + ctx = arg3; + fn = arg4; + } + var parentContext = ctx !== null && ctx !== void 0 ? ctx : contextApi.active(); + var span = this.startSpan(name, opts, parentContext); + var contextWithSpanSet = setSpan(parentContext, span); + return contextApi.with(contextWithSpanSet, fn, void 0, span); + }; + return NoopTracer2; + }() +); +function isSpanContext(spanContext) { + return typeof spanContext === "object" && typeof spanContext["spanId"] === "string" && typeof spanContext["traceId"] === "string" && typeof spanContext["traceFlags"] === "number"; +} + +// node_modules/@opentelemetry/api/build/esm/trace/ProxyTracer.js +var NOOP_TRACER = new NoopTracer(); +var ProxyTracer = ( + /** @class */ + function() { + function ProxyTracer2(_provider, name, version2, options) { + this._provider = _provider; + this.name = name; + this.version = version2; + this.options = options; + } + ProxyTracer2.prototype.startSpan = function(name, options, context2) { + return this._getTracer().startSpan(name, options, context2); + }; + ProxyTracer2.prototype.startActiveSpan = function(_name, _options, _context, _fn) { + var tracer = this._getTracer(); + return Reflect.apply(tracer.startActiveSpan, tracer, arguments); + }; + ProxyTracer2.prototype._getTracer = function() { + if (this._delegate) { + return this._delegate; + } + var tracer = this._provider.getDelegateTracer(this.name, this.version, this.options); + if (!tracer) { + return NOOP_TRACER; + } + this._delegate = tracer; + return this._delegate; + }; + return ProxyTracer2; + }() +); + +// node_modules/@opentelemetry/api/build/esm/trace/NoopTracerProvider.js +var NoopTracerProvider = ( + /** @class */ + function() { + function NoopTracerProvider2() { + } + NoopTracerProvider2.prototype.getTracer = function(_name, _version, _options) { + return new NoopTracer(); + }; + return NoopTracerProvider2; + }() +); + +// node_modules/@opentelemetry/api/build/esm/trace/ProxyTracerProvider.js +var NOOP_TRACER_PROVIDER = new NoopTracerProvider(); +var ProxyTracerProvider = ( + /** @class */ + function() { + function ProxyTracerProvider2() { + } + ProxyTracerProvider2.prototype.getTracer = function(name, version2, options) { + var _a; + return (_a = this.getDelegateTracer(name, version2, options)) !== null && _a !== void 0 ? _a : new ProxyTracer(this, name, version2, options); + }; + ProxyTracerProvider2.prototype.getDelegate = function() { + var _a; + return (_a = this._delegate) !== null && _a !== void 0 ? _a : NOOP_TRACER_PROVIDER; + }; + ProxyTracerProvider2.prototype.setDelegate = function(delegate) { + this._delegate = delegate; + }; + ProxyTracerProvider2.prototype.getDelegateTracer = function(name, version2, options) { + var _a; + return (_a = this._delegate) === null || _a === void 0 ? void 0 : _a.getTracer(name, version2, options); + }; + return ProxyTracerProvider2; + }() +); + +// node_modules/@opentelemetry/api/build/esm/trace/span_kind.js +var SpanKind; +(function(SpanKind2) { + SpanKind2[SpanKind2["INTERNAL"] = 0] = "INTERNAL"; + SpanKind2[SpanKind2["SERVER"] = 1] = "SERVER"; + SpanKind2[SpanKind2["CLIENT"] = 2] = "CLIENT"; + SpanKind2[SpanKind2["PRODUCER"] = 3] = "PRODUCER"; + SpanKind2[SpanKind2["CONSUMER"] = 4] = "CONSUMER"; +})(SpanKind || (SpanKind = {})); + +// node_modules/@opentelemetry/api/build/esm/trace/status.js +var SpanStatusCode; +(function(SpanStatusCode2) { + SpanStatusCode2[SpanStatusCode2["UNSET"] = 0] = "UNSET"; + SpanStatusCode2[SpanStatusCode2["OK"] = 1] = "OK"; + SpanStatusCode2[SpanStatusCode2["ERROR"] = 2] = "ERROR"; +})(SpanStatusCode || (SpanStatusCode = {})); + +// node_modules/@opentelemetry/api/build/esm/context-api.js +var context = ContextAPI.getInstance(); + +// node_modules/@opentelemetry/api/build/esm/propagation/NoopTextMapPropagator.js +var NoopTextMapPropagator = ( + /** @class */ + function() { + function NoopTextMapPropagator2() { + } + NoopTextMapPropagator2.prototype.inject = function(_context, _carrier) { + }; + NoopTextMapPropagator2.prototype.extract = function(context2, _carrier) { + return context2; + }; + NoopTextMapPropagator2.prototype.fields = function() { + return []; + }; + return NoopTextMapPropagator2; + }() +); + +// node_modules/@opentelemetry/api/build/esm/baggage/context-helpers.js +var BAGGAGE_KEY = createContextKey("OpenTelemetry Baggage Key"); +function getBaggage(context2) { + return context2.getValue(BAGGAGE_KEY) || void 0; +} +function getActiveBaggage() { + return getBaggage(ContextAPI.getInstance().active()); +} +function setBaggage(context2, baggage) { + return context2.setValue(BAGGAGE_KEY, baggage); +} +function deleteBaggage(context2) { + return context2.deleteValue(BAGGAGE_KEY); +} + +// node_modules/@opentelemetry/api/build/esm/api/propagation.js +var API_NAME3 = "propagation"; +var NOOP_TEXT_MAP_PROPAGATOR = new NoopTextMapPropagator(); +var PropagationAPI = ( + /** @class */ + function() { + function PropagationAPI2() { + this.createBaggage = createBaggage; + this.getBaggage = getBaggage; + this.getActiveBaggage = getActiveBaggage; + this.setBaggage = setBaggage; + this.deleteBaggage = deleteBaggage; + } + PropagationAPI2.getInstance = function() { + if (!this._instance) { + this._instance = new PropagationAPI2(); + } + return this._instance; + }; + PropagationAPI2.prototype.setGlobalPropagator = function(propagator) { + return registerGlobal(API_NAME3, propagator, DiagAPI.instance()); + }; + PropagationAPI2.prototype.inject = function(context2, carrier, setter) { + if (setter === void 0) { + setter = defaultTextMapSetter; + } + return this._getGlobalPropagator().inject(context2, carrier, setter); + }; + PropagationAPI2.prototype.extract = function(context2, carrier, getter) { + if (getter === void 0) { + getter = defaultTextMapGetter; + } + return this._getGlobalPropagator().extract(context2, carrier, getter); + }; + PropagationAPI2.prototype.fields = function() { + return this._getGlobalPropagator().fields(); + }; + PropagationAPI2.prototype.disable = function() { + unregisterGlobal(API_NAME3, DiagAPI.instance()); + }; + PropagationAPI2.prototype._getGlobalPropagator = function() { + return getGlobal(API_NAME3) || NOOP_TEXT_MAP_PROPAGATOR; + }; + return PropagationAPI2; + }() +); + +// node_modules/@opentelemetry/api/build/esm/propagation-api.js +var propagation = PropagationAPI.getInstance(); + +// node_modules/@opentelemetry/api/build/esm/api/trace.js +var API_NAME4 = "trace"; +var TraceAPI = ( + /** @class */ + function() { + function TraceAPI2() { + this._proxyTracerProvider = new ProxyTracerProvider(); + this.wrapSpanContext = wrapSpanContext; + this.isSpanContextValid = isSpanContextValid; + this.deleteSpan = deleteSpan; + this.getSpan = getSpan; + this.getActiveSpan = getActiveSpan; + this.getSpanContext = getSpanContext; + this.setSpan = setSpan; + this.setSpanContext = setSpanContext; + } + TraceAPI2.getInstance = function() { + if (!this._instance) { + this._instance = new TraceAPI2(); + } + return this._instance; + }; + TraceAPI2.prototype.setGlobalTracerProvider = function(provider) { + var success = registerGlobal(API_NAME4, this._proxyTracerProvider, DiagAPI.instance()); + if (success) { + this._proxyTracerProvider.setDelegate(provider); + } + return success; + }; + TraceAPI2.prototype.getTracerProvider = function() { + return getGlobal(API_NAME4) || this._proxyTracerProvider; + }; + TraceAPI2.prototype.getTracer = function(name, version2) { + return this.getTracerProvider().getTracer(name, version2); + }; + TraceAPI2.prototype.disable = function() { + unregisterGlobal(API_NAME4, DiagAPI.instance()); + this._proxyTracerProvider = new ProxyTracerProvider(); + }; + return TraceAPI2; + }() +); + +// node_modules/@opentelemetry/api/build/esm/trace-api.js +var trace = TraceAPI.getInstance(); + +// transport/message.ts +import { Type } from "@sinclair/typebox"; +var TransportMessageSchema = (t) => Type.Object({ + id: Type.String(), + from: Type.String(), + to: Type.String(), + seq: Type.Integer(), + ack: Type.Integer(), + serviceName: Type.Optional(Type.String()), + procedureName: Type.Optional(Type.String()), + streamId: Type.String(), + controlFlags: Type.Integer(), + tracing: Type.Optional( + Type.Object({ + traceparent: Type.String(), + tracestate: Type.String() + }) + ), + payload: t +}); +var ControlMessageAckSchema = Type.Object({ + type: Type.Literal("ACK") +}); +var ControlMessageCloseSchema = Type.Object({ + type: Type.Literal("CLOSE") +}); +var currentProtocolVersion = "v2.0"; +var acceptedProtocolVersions = ["v1.1", currentProtocolVersion]; +function isAcceptedProtocolVersion(version2) { + return acceptedProtocolVersions.includes(version2); +} +var ControlMessageHandshakeRequestSchema = Type.Object({ + type: Type.Literal("HANDSHAKE_REQ"), + protocolVersion: Type.String(), + sessionId: Type.String(), + /** + * Specifies what the server's expected session state (from the pov of the client). This can be + * used by the server to know whether this is a new or a reestablished connection, and whether it + * is compatible with what it already has. + */ + expectedSessionState: Type.Object({ + // what the client expects the server to send next + nextExpectedSeq: Type.Integer(), + nextSentSeq: Type.Integer() + }), + metadata: Type.Optional(Type.Unknown()) +}); +var HandshakeErrorRetriableResponseCodes = Type.Union([ + Type.Literal("SESSION_STATE_MISMATCH") +]); +var HandshakeErrorCustomHandlerFatalResponseCodes = Type.Union([ + // The custom validation handler rejected the handler because the client is unsupported. + Type.Literal("REJECTED_UNSUPPORTED_CLIENT"), + // The custom validation handler rejected the handshake. + Type.Literal("REJECTED_BY_CUSTOM_HANDLER") +]); +var HandshakeErrorFatalResponseCodes = Type.Union([ + HandshakeErrorCustomHandlerFatalResponseCodes, + // The ciient sent a handshake that doesn't comply with the extended handshake metadata. + Type.Literal("MALFORMED_HANDSHAKE_META"), + // The ciient sent a handshake that doesn't comply with ControlMessageHandshakeRequestSchema. + Type.Literal("MALFORMED_HANDSHAKE"), + // The client's protocol version does not match the server's. + Type.Literal("PROTOCOL_VERSION_MISMATCH") +]); +var HandshakeErrorResponseCodes = Type.Union([ + HandshakeErrorRetriableResponseCodes, + HandshakeErrorFatalResponseCodes +]); +var ControlMessageHandshakeResponseSchema = Type.Object({ + type: Type.Literal("HANDSHAKE_RESP"), + status: Type.Union([ + Type.Object({ + ok: Type.Literal(true), + sessionId: Type.String() + }), + Type.Object({ + ok: Type.Literal(false), + reason: Type.String(), + code: HandshakeErrorResponseCodes + }) + ]) +}); +var ControlMessagePayloadSchema = Type.Union([ + ControlMessageCloseSchema, + ControlMessageAckSchema, + ControlMessageHandshakeRequestSchema, + ControlMessageHandshakeResponseSchema +]); +var OpaqueTransportMessageSchema = TransportMessageSchema( + Type.Unknown() +); +function handshakeResponseMessage({ + from, + to, + status +}) { + return { + id: generateId(), + from, + to, + seq: 0, + ack: 0, + streamId: generateId(), + controlFlags: 0, + payload: { + type: "HANDSHAKE_RESP", + status + } + }; +} +function closeStreamMessage(streamId) { + return { + streamId, + controlFlags: 8 /* StreamClosedBit */, + payload: { + type: "CLOSE" + } + }; +} +function cancelMessage(streamId, payload) { + return { + streamId, + controlFlags: 4 /* StreamCancelBit */, + payload + }; +} +function isAck(controlFlag) { + return (controlFlag & 1 /* AckBit */) === 1 /* AckBit */; +} +function isStreamOpen(controlFlag) { + return ( + /* eslint-disable-next-line @typescript-eslint/no-unsafe-enum-comparison */ + (controlFlag & 2 /* StreamOpenBit */) === 2 /* StreamOpenBit */ + ); +} +function isStreamClose(controlFlag) { + return ( + /* eslint-disable-next-line @typescript-eslint/no-unsafe-enum-comparison */ + (controlFlag & 8 /* StreamClosedBit */) === 8 /* StreamClosedBit */ + ); +} +function isStreamCancel(controlFlag) { + return ( + /* eslint-disable-next-line @typescript-eslint/no-unsafe-enum-comparison */ + (controlFlag & 4 /* StreamCancelBit */) === 4 /* StreamCancelBit */ + ); +} + +// codec/json.ts +var encoder = new TextEncoder(); +var decoder = new TextDecoder(); +function uint8ArrayToBase64(uint8Array) { + let binary = ""; + uint8Array.forEach((byte) => { + binary += String.fromCharCode(byte); + }); + return btoa(binary); +} +function base64ToUint8Array(base64) { + const binaryString = atob(base64); + const uint8Array = new Uint8Array(binaryString.length); + for (let i = 0; i < binaryString.length; i++) { + uint8Array[i] = binaryString.charCodeAt(i); + } + return uint8Array; +} +var NaiveJsonCodec = { + toBuffer: (obj) => { + return encoder.encode( + JSON.stringify(obj, function replacer(key) { + const val = this[key]; + if (val instanceof Uint8Array) { + return { $t: uint8ArrayToBase64(val) }; + } else if (typeof val === "bigint") { + return { $b: val.toString() }; + } else { + return val; + } + }) + ); + }, + fromBuffer: (buff) => { + const parsed = JSON.parse( + decoder.decode(buff), + function reviver(_key, val) { + if (val?.$t !== void 0) { + return base64ToUint8Array(val.$t); + } else if (val?.$b !== void 0) { + return BigInt(val.$b); + } else { + return val; + } + } + ); + if (typeof parsed !== "object" || parsed === null) { + throw new Error("unpacked msg is not an object"); + } + return parsed; + } +}; + +// transport/options.ts +var defaultTransportOptions = { + heartbeatIntervalMs: 1e3, + heartbeatsUntilDead: 2, + sessionDisconnectGraceMs: 5e3, + connectionTimeoutMs: 2e3, + handshakeTimeoutMs: 1e3, + enableTransparentSessionReconnects: true, + codec: NaiveJsonCodec +}; +var defaultConnectionRetryOptions = { + baseIntervalMs: 150, + maxJitterMs: 200, + maxBackoffMs: 32e3, + attemptBudgetCapacity: 5, + budgetRestoreIntervalMs: 200, + isFatalConnectionError: () => false +}; +var defaultClientTransportOptions = { + ...defaultTransportOptions, + ...defaultConnectionRetryOptions +}; +var defaultServerTransportOptions = { + ...defaultTransportOptions +}; + +// logging/log.ts +var LoggingLevels = { + debug: -1, + info: 0, + warn: 1, + error: 2 +}; +var cleanedLogFn = (log) => { + return (msg, metadata) => { + if (metadata && !metadata.telemetry) { + const span = trace.getSpan(context.active()); + if (span) { + metadata.telemetry = { + traceId: span.spanContext().traceId, + spanId: span.spanContext().spanId + }; + } + } + if (!metadata?.transportMessage) { + log(msg, metadata); + return; + } + const { payload, ...rest } = metadata.transportMessage; + metadata.transportMessage = rest; + log(msg, metadata); + }; +}; +var BaseLogger = class { + minLevel; + output; + constructor(output, minLevel = "info") { + this.minLevel = minLevel; + this.output = output; + } + debug(msg, metadata) { + if (LoggingLevels[this.minLevel] <= LoggingLevels.debug) { + this.output(msg, metadata ?? {}, "debug"); + } + } + info(msg, metadata) { + if (LoggingLevels[this.minLevel] <= LoggingLevels.info) { + this.output(msg, metadata ?? {}, "info"); + } + } + warn(msg, metadata) { + if (LoggingLevels[this.minLevel] <= LoggingLevels.warn) { + this.output(msg, metadata ?? {}, "warn"); + } + } + error(msg, metadata) { + if (LoggingLevels[this.minLevel] <= LoggingLevels.error) { + this.output(msg, metadata ?? {}, "error"); + } + } +}; +var createLogProxy = (log) => ({ + debug: cleanedLogFn(log.debug.bind(log)), + info: cleanedLogFn(log.info.bind(log)), + warn: cleanedLogFn(log.warn.bind(log)), + error: cleanedLogFn(log.error.bind(log)) +}); + +// transport/events.ts +var ProtocolError = { + RetriesExceeded: "conn_retry_exceeded", + HandshakeFailed: "handshake_failed", + MessageOrderingViolated: "message_ordering_violated", + InvalidMessage: "invalid_message", + MessageSendFailure: "message_send_failure" +}; +var EventDispatcher = class { + eventListeners = {}; + removeAllListeners() { + this.eventListeners = {}; + } + numberOfListeners(eventType) { + return this.eventListeners[eventType]?.size ?? 0; + } + addEventListener(eventType, handler) { + if (!this.eventListeners[eventType]) { + this.eventListeners[eventType] = /* @__PURE__ */ new Set(); + } + this.eventListeners[eventType]?.add(handler); + } + removeEventListener(eventType, handler) { + const handlers = this.eventListeners[eventType]; + if (handlers) { + this.eventListeners[eventType]?.delete(handler); + } + } + dispatchEvent(eventType, event) { + const handlers = this.eventListeners[eventType]; + if (handlers) { + const copy = [...handlers]; + for (const handler of copy) { + handler(event); + } + } + } +}; + +// transport/sessionStateMachine/common.ts +var ERR_CONSUMED = `session state has been consumed and is no longer valid`; +var StateMachineState = class { + /* + * Whether this state has been consumed + * and we've moved on to another state + */ + _isConsumed; + /** + * Cleanup this state machine state and mark it as consumed. + * After calling close, it is an error to access any properties on the state. + * You should never need to call this as a consumer. + * + * If you're looking to close the session from the client, + * use `.hardDisconnect` on the client transport. + */ + close() { + this._handleClose(); + } + constructor() { + this._isConsumed = false; + return new Proxy(this, { + get(target, prop) { + if (prop === "_isConsumed" || prop === "id" || prop === "state") { + return Reflect.get(target, prop); + } + if (prop === "_handleStateExit") { + return () => { + target._isConsumed = true; + target._handleStateExit(); + }; + } + if (prop === "_handleClose") { + return () => { + target._isConsumed = true; + target._handleStateExit(); + target._handleClose(); + }; + } + if (target._isConsumed) { + throw new Error( + `${ERR_CONSUMED}: getting ${prop.toString()} on consumed state` + ); + } + return Reflect.get(target, prop); + }, + set(target, prop, value) { + if (target._isConsumed) { + throw new Error( + `${ERR_CONSUMED}: setting ${prop.toString()} on consumed state` + ); + } + return Reflect.set(target, prop, value); + } + }); + } +}; +var CommonSession = class extends StateMachineState { + from; + options; + codec; + tracer; + log; + constructor({ from, options, log, tracer, codec }) { + super(); + this.from = from; + this.options = options; + this.log = log; + this.tracer = tracer; + this.codec = codec; + } +}; +var IdentifiedSession = class extends CommonSession { + id; + telemetry; + to; + protocolVersion; + /** + * Index of the message we will send next (excluding handshake) + */ + seq; + /** + * Last seq we sent over the wire this session (excluding handshake) and retransmissions + */ + seqSent; + /** + * Number of unique messages we've received this session (excluding handshake) + */ + ack; + sendBuffer; + constructor(props) { + const { + id, + to, + seq, + ack, + sendBuffer, + telemetry, + log, + protocolVersion, + seqSent: messagesSent + } = props; + super(props); + this.id = id; + this.to = to; + this.seq = seq; + this.ack = ack; + this.sendBuffer = sendBuffer; + this.telemetry = telemetry; + this.log = log; + this.protocolVersion = protocolVersion; + this.seqSent = messagesSent; + } + get loggingMetadata() { + const metadata = { + clientId: this.from, + connectedTo: this.to, + sessionId: this.id + }; + if (this.telemetry.span.isRecording()) { + const spanContext = this.telemetry.span.spanContext(); + metadata.telemetry = { + traceId: spanContext.traceId, + spanId: spanContext.spanId + }; + } + return metadata; + } + constructMsg(partialMsg) { + const msg = { + ...partialMsg, + id: generateId(), + to: this.to, + from: this.from, + seq: this.seq, + ack: this.ack + }; + this.seq++; + return msg; + } + nextSeq() { + return this.sendBuffer.length > 0 ? this.sendBuffer[0].seq : this.seq; + } + send(msg) { + const constructedMsg = this.constructMsg(msg); + this.sendBuffer.push(constructedMsg); + return { + ok: true, + value: constructedMsg.id + }; + } + _handleStateExit() { + } + _handleClose() { + this.sendBuffer.length = 0; + this.telemetry.span.end(); + } +}; +var IdentifiedSessionWithGracePeriod = class extends IdentifiedSession { + graceExpiryTime; + gracePeriodTimeout; + listeners; + constructor(props) { + super(props); + this.listeners = props.listeners; + this.graceExpiryTime = props.graceExpiryTime; + this.gracePeriodTimeout = setTimeout(() => { + this.listeners.onSessionGracePeriodElapsed(); + }, this.graceExpiryTime - Date.now()); + } + _handleStateExit() { + super._handleStateExit(); + if (this.gracePeriodTimeout) { + clearTimeout(this.gracePeriodTimeout); + this.gracePeriodTimeout = void 0; + } + } + _handleClose() { + super._handleClose(); + } +}; +function sendMessage(conn, codec, msg) { + const buff = codec.toBuffer(msg); + if (!buff.ok) { + return buff; + } + const sent = conn.send(buff.value); + if (!sent) { + return { + ok: false, + reason: "failed to send message" + }; + } + return { + ok: true, + value: msg.id + }; +} + +// transport/sessionStateMachine/SessionConnecting.ts +var SessionConnecting = class extends IdentifiedSessionWithGracePeriod { + state = "Connecting" /* Connecting */; + connPromise; + listeners; + connectionTimeout; + constructor(props) { + super(props); + this.connPromise = props.connPromise; + this.listeners = props.listeners; + this.connPromise.then( + (conn) => { + if (this._isConsumed) return; + this.listeners.onConnectionEstablished(conn); + }, + (err) => { + if (this._isConsumed) return; + this.listeners.onConnectionFailed(err); + } + ); + this.connectionTimeout = setTimeout(() => { + this.listeners.onConnectionTimeout(); + }, this.options.connectionTimeoutMs); + } + // close a pending connection if it resolves, ignore errors if the promise + // ends up rejected anyways + bestEffortClose() { + const logger = this.log; + const metadata = this.loggingMetadata; + this.connPromise.then((conn) => { + conn.close(); + logger?.info( + "connection eventually resolved but session has transitioned, closed connection", + { + ...metadata, + ...conn.loggingMetadata + } + ); + }).catch(() => { + }); + } + _handleStateExit() { + super._handleStateExit(); + if (this.connectionTimeout) { + clearTimeout(this.connectionTimeout); + this.connectionTimeout = void 0; + } + } + _handleClose() { + super._handleClose(); + this.bestEffortClose(); + } +}; + +// transport/sessionStateMachine/SessionNoConnection.ts +var SessionNoConnection = class extends IdentifiedSessionWithGracePeriod { + state = "NoConnection" /* NoConnection */; + _handleClose() { + super._handleClose(); + } + _handleStateExit() { + super._handleStateExit(); + } +}; + +// router/services.ts +import { Type as Type3, Kind as Kind2 } from "@sinclair/typebox"; + +// router/errors.ts +import { + Kind, + Type as Type2 +} from "@sinclair/typebox"; +var UNCAUGHT_ERROR_CODE = "UNCAUGHT_ERROR"; +var UNEXPECTED_DISCONNECT_CODE = "UNEXPECTED_DISCONNECT"; +var INVALID_REQUEST_CODE = "INVALID_REQUEST"; +var CANCEL_CODE = "CANCEL"; +var ErrResultSchema = (t) => Type2.Object({ + ok: Type2.Literal(false), + payload: t +}); +var ValidationErrorDetails = Type2.Object({ + path: Type2.String(), + message: Type2.String() +}); +var ValidationErrors = Type2.Array(ValidationErrorDetails); +function castTypeboxValueErrors(errors) { + const result = []; + for (const error of errors) { + result.push({ + path: error.path, + message: error.message + }); + } + return result; +} +var CancelErrorSchema = Type2.Object({ + code: Type2.Literal(CANCEL_CODE), + message: Type2.String() +}); +var CancelResultSchema = ErrResultSchema(CancelErrorSchema); +var ReaderErrorSchema = Type2.Union([ + Type2.Object({ + code: Type2.Literal(UNCAUGHT_ERROR_CODE), + message: Type2.String() + }), + Type2.Object({ + code: Type2.Literal(UNEXPECTED_DISCONNECT_CODE), + message: Type2.String() + }), + Type2.Object({ + code: Type2.Literal(INVALID_REQUEST_CODE), + message: Type2.String(), + extras: Type2.Optional( + Type2.Object({ + firstValidationErrors: Type2.Array(ValidationErrorDetails), + totalErrors: Type2.Number() + }) + ) + }), + CancelErrorSchema +]); +var ReaderErrorResultSchema = ErrResultSchema(ReaderErrorSchema); +function isUnion(schema) { + return schema[Kind] === "Union"; +} +function flattenErrorType(errType) { + if (!isUnion(errType)) { + return errType; + } + const flattenedTypes = []; + function flatten(type) { + if (isUnion(type)) { + for (const t of type.anyOf) { + flatten(t); + } + } else { + flattenedTypes.push(type); + } + } + flatten(errType); + return Type2.Union(flattenedTypes); +} + +// router/services.ts +function Strict(schema) { + return JSON.parse(JSON.stringify(schema)); +} +function createServiceSchema() { + return class ServiceSchema2 { + /** + * Factory function for creating a fresh state. + */ + initializeState; + /** + * The procedures for this service. + */ + procedures; + /** + * @param config - The configuration for this service. + * @param procedures - The procedures for this service. + */ + constructor(config, procedures) { + this.initializeState = config.initializeState; + this.procedures = procedures; + } + /** + * Creates a {@link ServiceScaffold}, which can be used to define procedures + * that can then be merged into a {@link ServiceSchema}, via the scaffold's + * `finalize` method. + * + * There are two patterns that work well with this method. The first is using + * it to separate the definition of procedures from the definition of the + * service's configuration: + * ```ts + * const MyServiceScaffold = ServiceSchema.scaffold({ + * initializeState: () => ({ count: 0 }), + * }); + * + * const incrementProcedures = MyServiceScaffold.procedures({ + * increment: Procedure.rpc({ + * requestInit: Type.Object({ amount: Type.Number() }), + * responseData: Type.Object({ current: Type.Number() }), + * async handler(ctx, init) { + * ctx.state.count += init.amount; + * return Ok({ current: ctx.state.count }); + * } + * }), + * }) + * + * const MyService = MyServiceScaffold.finalize({ + * ...incrementProcedures, + * // you can also directly define procedures here + * }); + * ``` + * This might be really handy if you have a very large service and you're + * wanting to split it over multiple files. You can define the scaffold + * in one file, and then import that scaffold in other files where you + * define procedures - and then finally import the scaffolds and your + * procedure objects in a final file where you finalize the scaffold into + * a service schema. + * + * The other way is to use it like in a builder pattern: + * ```ts + * const MyService = ServiceSchema + * .scaffold({ initializeState: () => ({ count: 0 }) }) + * .finalize({ + * increment: Procedure.rpc({ + * requestInit: Type.Object({ amount: Type.Number() }), + * responseData: Type.Object({ current: Type.Number() }), + * async handler(ctx, init) { + * ctx.state.count += init.amount; + * return Ok({ current: ctx.state.count }); + * } + * }), + * }) + * ``` + * Depending on your preferences, this may be a more appealing way to define + * a schema versus using the {@link ServiceSchema.define} method. + */ + static scaffold(config) { + return new ServiceScaffold(config); + } + // actual implementation + static define(configOrProcedures, maybeProcedures) { + let config; + let procedures; + if ("initializeState" in configOrProcedures && typeof configOrProcedures.initializeState === "function") { + if (!maybeProcedures) { + throw new Error("Expected procedures to be defined"); + } + config = configOrProcedures; + procedures = maybeProcedures; + } else { + config = { initializeState: () => ({}) }; + procedures = configOrProcedures; + } + return new ServiceSchema2(config, procedures); + } + /** + * Serializes this schema's procedures into a plain object that is JSON compatible. + */ + serialize() { + return { + procedures: Object.fromEntries( + Object.entries(this.procedures).map(([procName, procDef]) => [ + procName, + { + init: Strict(procDef.requestInit), + output: Strict(procDef.responseData), + errors: getSerializedProcErrors(procDef), + // Only add `description` field if the type declares it. + ..."description" in procDef ? { description: procDef.description } : {}, + type: procDef.type, + // Only add the `input` field if the type declares it. + ..."requestData" in procDef ? { + input: Strict(procDef.requestData) + } : {} + } + ]) + ) + }; + } + // TODO remove once clients migrate to v2 + /** + * Same as {@link ServiceSchema.serialize}, but with a format that is compatible with + * protocol v1. This is useful to be able to continue to generate schemas for older + * clients as they are still supported. + */ + serializeV1Compat() { + return { + procedures: Object.fromEntries( + Object.entries(this.procedures).map( + ([procName, procDef]) => { + if (procDef.type === "rpc" || procDef.type === "subscription") { + return [ + procName, + { + // BACKWARDS COMPAT: map init to input for protocolv1 + // this is the only change needed to make it compatible. + input: Strict(procDef.requestInit), + output: Strict(procDef.responseData), + errors: getSerializedProcErrors(procDef), + // Only add `description` field if the type declares it. + ..."description" in procDef ? { description: procDef.description } : {}, + type: procDef.type + } + ]; + } + return [ + procName, + { + init: Strict(procDef.requestInit), + output: Strict(procDef.responseData), + errors: getSerializedProcErrors(procDef), + // Only add `description` field if the type declares it. + ..."description" in procDef ? { description: procDef.description } : {}, + type: procDef.type, + input: Strict(procDef.requestData) + } + ]; + } + ) + ) + }; + } + /** + * Instantiates this schema into a {@link Service} object. + * + * You probably don't need this, usually the River server will handle this + * for you. + */ + instantiate(extendedContext) { + const state = this.initializeState(extendedContext); + const dispose = async () => { + await state[Symbol.asyncDispose]?.(); + state[Symbol.dispose]?.(); + }; + return Object.freeze({ + state, + procedures: this.procedures, + [Symbol.asyncDispose]: dispose + }); + } + }; +} +function getSerializedProcErrors(procDef) { + if (!("responseError" in procDef) || procDef.responseError[Kind2] === "Never") { + return Strict(ReaderErrorSchema); + } + const withProtocolErrors = flattenErrorType( + Type3.Union([procDef.responseError, ReaderErrorSchema]) + ); + return Strict(withProtocolErrors); +} +var ServiceScaffold = class { + /** + * The configuration for this service. + */ + config; + /** + * @param config - The configuration for this service. + */ + constructor(config) { + this.config = config; + } + /** + * Define procedures for this service. Use the {@link Procedure} constructors + * to create them. This returns the procedures object, which can then be + * passed to {@link ServiceSchema.finalize} to create a {@link ServiceSchema}. + * + * @example + * ``` + * const myProcedures = MyServiceScaffold.procedures({ + * myRPC: Procedure.rpc({ + * // ... + * }), + * }); + * + * const MyService = MyServiceScaffold.finalize({ + * ...myProcedures, + * }); + * ``` + * + * @param procedures - The procedures for this service. + */ + procedures(procedures) { + return procedures; + } + /** + * Finalizes the scaffold into a {@link ServiceSchema}. This is where you + * provide the service's procedures and get a {@link ServiceSchema} in return. + * + * You can directly define procedures here, or you can define them separately + * with the {@link ServiceScaffold.procedures} method, and then pass them here. + * + * @example + * ``` + * const MyService = MyServiceScaffold.finalize({ + * myRPC: Procedure.rpc({ + * // ... + * }), + * // e.g. from the procedures method + * ...myOtherProcedures, + * }); + * ``` + */ + finalize(procedures) { + return createServiceSchema().define( + this.config, + procedures + ); + } +}; + +// router/result.ts +import { Type as Type4 } from "@sinclair/typebox"; +var AnyResultSchema = Type4.Union([ + Type4.Object({ + ok: Type4.Literal(false), + payload: Type4.Object({ + code: Type4.String(), + message: Type4.String(), + extras: Type4.Optional(Type4.Unknown()) + }) + }), + Type4.Object({ + ok: Type4.Literal(true), + payload: Type4.Unknown() + }) +]); +function Ok(payload) { + return { + ok: true, + payload + }; +} +function Err(error) { + return { + ok: false, + payload: error + }; +} + +// router/streams.ts +var ReadableBrokenError = { + code: "READABLE_BROKEN", + message: "Readable was broken before it is fully consumed" +}; +function createPromiseWithResolvers() { + let resolve; + let reject; + const promise = new Promise((res, rej) => { + resolve = res; + reject = rej; + }); + return { + promise, + // @ts-expect-error promise callbacks are sync + resolve, + // @ts-expect-error promise callbacks are sync + reject + }; +} +var ReadableImpl = class { + /** + * Whether the {@link Readable} is closed. + * + * Closed {@link Readable}s are done receiving values, but that doesn't affect + * any other aspect of the {@link Readable} such as it's consumability. + */ + closed = false; + /** + * Whether the {@link Readable} is locked. + * + * @see {@link Readable}'s typedoc to understand locking + */ + locked = false; + /** + * Whether {@link break} was called. + * + * @see {@link break} for more information + */ + broken = false; + /** + * This flag allows us to avoid emitting a {@link ReadableBrokenError} after {@link break} was called + * in cases where the {@link queue} is fully consumed and {@link ReadableImpl} is {@link closed}. This is just an + * ergonomic feature to avoid emitting an error in our iteration when we don't have to. + */ + brokenWithValuesLeftToRead = false; + /** + * A list of values that have been pushed to the {@link ReadableImpl} but not yet emitted to the user. + */ + queue = []; + /** + * Used by methods in the class to signal to the iterator that it + * should check for the next value. + */ + next = null; + /** + * Consumes the {@link Readable} and returns an {@link AsyncIterator} that can be used + * to iterate over the values in the {@link Readable}. + */ + [Symbol.asyncIterator]() { + if (this.locked) { + throw new TypeError("Readable is already locked"); + } + this.locked = true; + let didSignalBreak = false; + return { + next: async () => { + if (didSignalBreak) { + return { + done: true, + value: void 0 + }; + } + while (this.queue.length === 0) { + if (this.closed && !this.brokenWithValuesLeftToRead) { + return { + done: true, + value: void 0 + }; + } + if (this.broken) { + didSignalBreak = true; + return { + done: false, + value: Err(ReadableBrokenError) + }; + } + if (!this.next) { + this.next = createPromiseWithResolvers(); + } + await this.next.promise; + this.next = null; + } + const value = this.queue.shift(); + return { done: false, value }; + }, + return: async () => { + this.break(); + return { done: true, value: void 0 }; + } + }; + } + /** + * Collects all the values from the {@link Readable} into an array. + * + * @see {@link Readable}'s typedoc for more information + */ + async collect() { + const array = []; + for await (const value of this) { + array.push(value); + } + return array; + } + /** + * Breaks the {@link Readable} and signals an error to any iterators waiting for the next value. + * + * @see {@link Readable}'s typedoc for more information + */ + break() { + if (this.broken) { + return; + } + this.locked = true; + this.broken = true; + this.brokenWithValuesLeftToRead = this.queue.length > 0; + this.queue.length = 0; + this.next?.resolve(); + } + /** + * Whether the {@link Readable} is readable. + * + * @see {@link Readable}'s typedoc for more information + */ + isReadable() { + return !this.locked && !this.broken; + } + /** + * Pushes a value to be read. + */ + _pushValue(value) { + if (this.broken) { + return; + } + if (this.closed) { + throw new Error("Cannot push to closed Readable"); + } + this.queue.push(value); + this.next?.resolve(); + } + /** + * Triggers the close of the {@link Readable}. Make sure to push all remaining + * values before calling this method. + */ + _triggerClose() { + if (this.closed) { + throw new Error("Unexpected closing multiple times"); + } + this.closed = true; + this.next?.resolve(); + } + /** + * @internal meant for use within river, not exposed as a public API + */ + _hasValuesInQueue() { + return this.queue.length > 0; + } + /** + * Whether the {@link Readable} is closed. + */ + isClosed() { + return this.closed; + } +}; +var WritableImpl = class { + /** + * Passed via constructor to pass on calls to {@link write} + */ + writeCb; + /** + * Passed via constructor to pass on calls to {@link close} + */ + closeCb; + /** + * Whether {@link close} was called, and {@link Writable} is not writable anymore. + */ + closed = false; + constructor(callbacks) { + this.writeCb = callbacks.writeCb; + this.closeCb = callbacks.closeCb; + } + write(value) { + if (this.closed) { + throw new Error("Cannot write to closed Writable"); + } + this.writeCb(value); + } + isWritable() { + return !this.closed; + } + close(value) { + if (this.closed) { + return; + } + if (value !== void 0) { + this.writeCb(value); + } + this.closed = true; + this.writeCb = () => void 0; + this.closeCb(); + this.closeCb = () => void 0; + } + /** + * @internal meant for use within river, not exposed as a public API + */ + isClosed() { + return this.closed; + } +}; + +// router/procedures.ts +import { Type as Type5 } from "@sinclair/typebox"; +function rpc({ + requestInit, + responseData, + responseError = Type5.Never(), + description, + handler +}) { + return { + ...description ? { description } : {}, + type: "rpc", + requestInit, + responseData, + responseError, + handler + }; +} +function upload({ + requestInit, + requestData, + responseData, + responseError = Type5.Never(), + description, + handler +}) { + return { + type: "upload", + ...description ? { description } : {}, + requestInit, + requestData, + responseData, + responseError, + handler + }; +} +function subscription({ + requestInit, + responseData, + responseError = Type5.Never(), + description, + handler +}) { + return { + type: "subscription", + ...description ? { description } : {}, + requestInit, + responseData, + responseError, + handler + }; +} +function stream({ + requestInit, + requestData, + responseData, + responseError = Type5.Never(), + description, + handler +}) { + return { + type: "stream", + ...description ? { description } : {}, + requestInit, + requestData, + responseData, + responseError, + handler + }; +} +var Procedure = { + rpc, + upload, + subscription, + stream +}; + +// router/server.ts +import { Value } from "@sinclair/typebox/value"; + +// transport/stringifyError.ts +function coerceErrorString(err) { + if (err instanceof Error) { + return err.message || "unknown reason"; + } + return `[coerced to error] ${String(err)}`; +} + +// router/server.ts +var RiverServer = class { + transport; + contextMap; + log; + middlewares; + /** + * We create a tombstones for streams cancelled by the server + * so that we don't hit errors when the client has inflight + * requests it sent before it saw the cancel. + * We track cancelled streams for every client separately, so + * that bad clients don't affect good clients. + */ + serverCancelledStreams; + maxCancelledStreamTombstonesPerSession; + streams; + services; + unregisterTransportListeners; + constructor(transport, services2, handshakeOptions, extendedContext, maxCancelledStreamTombstonesPerSession = 200, middlewares = []) { + const instances = {}; + this.middlewares = middlewares; + this.services = instances; + this.contextMap = /* @__PURE__ */ new Map(); + extendedContext = extendedContext ?? {}; + for (const [name, service] of Object.entries(services2)) { + const instance = service.instantiate(extendedContext); + instances[name] = instance; + this.contextMap.set(instance, { + ...extendedContext, + state: instance.state + }); + } + if (handshakeOptions) { + transport.extendHandshake(handshakeOptions); + } + this.transport = transport; + this.streams = /* @__PURE__ */ new Map(); + this.serverCancelledStreams = /* @__PURE__ */ new Map(); + this.maxCancelledStreamTombstonesPerSession = maxCancelledStreamTombstonesPerSession; + this.log = transport.log; + const handleCreatingNewStreams = (message) => { + if (message.to !== this.transport.clientId) { + this.log?.info( + `got msg with destination that isn't this server, ignoring`, + { + clientId: this.transport.clientId, + transportMessage: message + } + ); + return; + } + const streamId = message.streamId; + const stream2 = this.streams.get(streamId); + if (stream2) { + stream2.handleMsg(message); + return; + } + if (this.serverCancelledStreams.get(message.from)?.has(streamId)) { + return; + } + const newStreamProps = this.validateNewProcStream(message); + if (!newStreamProps) { + return; + } + createHandlerSpan( + transport.tracer, + newStreamProps.initialSession, + newStreamProps.procedure.type, + newStreamProps.serviceName, + newStreamProps.procedureName, + newStreamProps.streamId, + newStreamProps.tracingCtx, + (span) => { + this.createNewProcStream(span, newStreamProps); + } + ); + }; + const handleSessionStatus = (evt) => { + if (evt.status !== "closing") return; + const disconnectedClientId = evt.session.to; + this.log?.info( + `got session disconnect from ${disconnectedClientId}, cleaning up streams`, + evt.session.loggingMetadata + ); + for (const stream2 of this.streams.values()) { + if (stream2.from === disconnectedClientId) { + stream2.handleSessionDisconnect(); + } + } + this.serverCancelledStreams.delete(disconnectedClientId); + }; + const handleTransportStatus = (evt) => { + if (evt.status !== "closed") return; + this.unregisterTransportListeners(); + }; + this.unregisterTransportListeners = () => { + this.transport.removeEventListener("message", handleCreatingNewStreams); + this.transport.removeEventListener("sessionStatus", handleSessionStatus); + this.transport.removeEventListener( + "transportStatus", + handleTransportStatus + ); + }; + this.transport.addEventListener("message", handleCreatingNewStreams); + this.transport.addEventListener("sessionStatus", handleSessionStatus); + this.transport.addEventListener("transportStatus", handleTransportStatus); + } + createNewProcStream(span, props) { + const { + streamId, + initialSession, + procedureName, + serviceName, + procedure, + sessionMetadata, + serviceContext, + initPayload, + procClosesWithInit, + passInitAsDataForBackwardsCompat + } = props; + const { + to: from, + loggingMetadata, + protocolVersion, + id: sessionId + } = initialSession; + loggingMetadata.telemetry = { + traceId: span.spanContext().traceId, + spanId: span.spanContext().spanId + }; + let cleanClose = true; + const onMessage = (msg) => { + if (msg.from !== from) { + this.log?.error("got stream message from unexpected client", { + ...loggingMetadata, + transportMessage: msg, + tags: ["invariant-violation"] + }); + return; + } + if (isStreamCancelBackwardsCompat(msg.controlFlags, protocolVersion)) { + let cancelResult; + if (Value.Check(CancelResultSchema, msg.payload)) { + cancelResult = msg.payload; + } else { + cancelResult = Err({ + code: CANCEL_CODE, + message: "stream cancelled, client sent invalid payload" + }); + this.log?.warn("got stream cancel without a valid protocol error", { + ...loggingMetadata, + transportMessage: msg, + validationErrors: [ + ...Value.Errors(CancelResultSchema, msg.payload) + ], + tags: ["invalid-request"] + }); + } + if (!reqReadable.isClosed()) { + reqReadable._pushValue(cancelResult); + closeReadable(); + } + resWritable.close(); + return; + } + if (reqReadable.isClosed()) { + this.log?.warn("received message after request stream is closed", { + ...loggingMetadata, + transportMessage: msg, + tags: ["invalid-request"] + }); + onServerCancel({ + code: INVALID_REQUEST_CODE, + message: "received message after request stream is closed" + }); + return; + } + if ("requestData" in procedure && Value.Check(procedure.requestData, msg.payload)) { + reqReadable._pushValue(Ok(msg.payload)); + if (isStreamCloseBackwardsCompat(msg.controlFlags, protocolVersion)) { + closeReadable(); + } + return; + } + if (Value.Check(ControlMessagePayloadSchema, msg.payload) && isStreamCloseBackwardsCompat(msg.controlFlags, protocolVersion)) { + closeReadable(); + return; + } + let validationErrors; + let errMessage; + if ("requestData" in procedure) { + errMessage = "message in requestData position did not match schema"; + validationErrors = castTypeboxValueErrors( + Value.Errors(procedure.requestData, msg.payload) + ); + } else { + validationErrors = castTypeboxValueErrors( + Value.Errors(ControlMessagePayloadSchema, msg.payload) + ); + errMessage = "message in control payload position did not match schema"; + } + this.log?.warn(errMessage, { + ...loggingMetadata, + transportMessage: msg, + validationErrors: validationErrors.map((error) => ({ + path: error.path, + message: error.message + })), + tags: ["invalid-request"] + }); + onServerCancel({ + code: INVALID_REQUEST_CODE, + message: errMessage, + extras: { + totalErrors: validationErrors.length, + firstValidationErrors: validationErrors.slice(0, 5) + } + }); + }; + const finishedController = new AbortController(); + const procStream = { + from, + streamId, + procedureName, + serviceName, + sessionMetadata, + procedure, + handleMsg: onMessage, + handleSessionDisconnect: () => { + cleanClose = false; + const errPayload = { + code: UNEXPECTED_DISCONNECT_CODE, + message: "client unexpectedly disconnected" + }; + if (!reqReadable.isClosed()) { + reqReadable._pushValue(Err(errPayload)); + closeReadable(); + } + resWritable.close(); + } + }; + const sessionScopedSend = this.transport.getSessionBoundSendFn( + from, + sessionId + ); + const cancelStream = (streamId2, payload) => { + this.cancelStream(from, sessionScopedSend, streamId2, payload); + }; + const onServerCancel = (e) => { + recordRiverError(span, e); + if (reqReadable.isClosed() && resWritable.isClosed()) { + return; + } + cleanClose = false; + const result = Err(e); + if (!reqReadable.isClosed()) { + reqReadable._pushValue(result); + closeReadable(); + } + resWritable.close(); + cancelStream(streamId, result); + }; + const cleanup = () => { + finishedController.abort(); + this.streams.delete(streamId); + }; + const procClosesWithResponse = procedure.type === "rpc" || procedure.type === "upload"; + const reqReadable = new ReadableImpl(); + const closeReadable = () => { + reqReadable._triggerClose(); + if (protocolVersion === "v1.1") { + if (!procClosesWithResponse && !resWritable.isClosed()) { + resWritable.close(); + } + } + if (resWritable.isClosed()) { + cleanup(); + } + }; + if (passInitAsDataForBackwardsCompat) { + reqReadable._pushValue(Ok(initPayload)); + } + const resWritable = new WritableImpl({ + writeCb: (response) => { + if (!response.ok) { + recordRiverError(span, response.payload); + } + sessionScopedSend({ + streamId, + controlFlags: procClosesWithResponse ? getStreamCloseBackwardsCompat(protocolVersion) : 0, + payload: response + }); + if (procClosesWithResponse) { + resWritable.close(); + } + }, + // close callback + closeCb: () => { + if (!procClosesWithResponse && cleanClose) { + const message = closeStreamMessage(streamId); + message.controlFlags = getStreamCloseBackwardsCompat(protocolVersion); + sessionScopedSend(message); + } + if (protocolVersion === "v1.1") { + if (!reqReadable.isClosed()) { + closeReadable(); + } + } + if (reqReadable.isClosed()) { + cleanup(); + } + } + }); + const onHandlerError = (err, span2) => { + const errorMsg = coerceErrorString(err); + span2.recordException(err instanceof Error ? err : new Error(errorMsg)); + this.log?.error( + `${serviceName}.${procedureName} handler threw an uncaught error`, + { + ...loggingMetadata, + transportMessage: { + procedureName, + serviceName + }, + extras: { + error: errorMsg, + originalException: err + }, + tags: ["uncaught-handler-error"] + } + ); + onServerCancel({ + code: UNCAUGHT_ERROR_CODE, + message: errorMsg + }); + }; + if (procClosesWithInit) { + closeReadable(); + } + const handlerContextWithSpan = { + ...serviceContext, + from, + sessionId, + metadata: sessionMetadata, + span, + cancel: (message) => { + const errRes = { + code: CANCEL_CODE, + message: message ?? "cancelled by server procedure handler" + }; + onServerCancel(errRes); + return Err(errRes); + }, + signal: finishedController.signal + }; + const middlewareContext = { + ...serviceContext, + sessionId, + from, + metadata: sessionMetadata, + span, + signal: finishedController.signal, + streamId, + procedureName, + serviceName + }; + const runProcedureHandler = async () => { + switch (procedure.type) { + case "rpc": + try { + const responsePayload = await procedure.handler({ + ctx: handlerContextWithSpan, + reqInit: initPayload + }); + if (resWritable.isClosed()) { + return; + } + resWritable.write(responsePayload); + } catch (err) { + onHandlerError(err, span); + } finally { + span.end(); + } + break; + case "stream": + try { + await procedure.handler({ + ctx: handlerContextWithSpan, + reqInit: initPayload, + reqReadable, + resWritable + }); + } catch (err) { + onHandlerError(err, span); + } finally { + span.end(); + } + break; + case "subscription": + try { + await procedure.handler({ + ctx: handlerContextWithSpan, + reqInit: initPayload, + resWritable + }); + } catch (err) { + onHandlerError(err, span); + } finally { + span.end(); + } + break; + case "upload": + try { + const responsePayload = await procedure.handler({ + ctx: handlerContextWithSpan, + reqInit: initPayload, + reqReadable + }); + if (resWritable.isClosed()) { + return; + } + resWritable.write(responsePayload); + } catch (err) { + onHandlerError(err, span); + } finally { + span.end(); + } + break; + } + }; + this.middlewares.reduceRight( + (next, middleware) => { + return () => { + middleware({ + ctx: middlewareContext, + reqInit: initPayload, + next + }); + }; + }, + () => { + void runProcedureHandler(); + } + )(); + if (!finishedController.signal.aborted) { + this.streams.set(streamId, procStream); + } + } + getContext(service, serviceName) { + const context2 = this.contextMap.get(service); + if (!context2) { + const err = `no context found for ${serviceName}`; + this.log?.error(err, { + clientId: this.transport.clientId, + tags: ["invariant-violation"] + }); + throw new Error(err); + } + return context2; + } + validateNewProcStream(initMessage) { + const session = this.transport.sessions.get(initMessage.from); + if (!session) { + this.log?.error(`couldn't find session for ${initMessage.from}`, { + clientId: this.transport.clientId, + transportMessage: initMessage, + tags: ["invariant-violation"] + }); + return null; + } + const sessionScopedSend = this.transport.getSessionBoundSendFn( + initMessage.from, + session.id + ); + const cancelStream = (streamId, payload) => { + this.cancelStream(initMessage.from, sessionScopedSend, streamId, payload); + }; + const sessionMetadata = this.transport.sessionHandshakeMetadata.get( + session.to + ); + if (!sessionMetadata) { + const errMessage = `session doesn't have handshake metadata`; + this.log?.error(errMessage, { + ...session.loggingMetadata, + tags: ["invariant-violation"] + }); + cancelStream( + initMessage.streamId, + Err({ + code: UNCAUGHT_ERROR_CODE, + message: errMessage + }) + ); + return null; + } + if (!isStreamOpen(initMessage.controlFlags)) { + const errMessage = `can't create a new procedure stream from a message that doesn't have the stream open bit set`; + this.log?.warn(errMessage, { + ...session.loggingMetadata, + clientId: this.transport.clientId, + transportMessage: initMessage, + tags: ["invalid-request"] + }); + cancelStream( + initMessage.streamId, + Err({ + code: INVALID_REQUEST_CODE, + message: errMessage + }) + ); + return null; + } + if (!initMessage.serviceName) { + const errMessage = `missing service name in stream open message`; + this.log?.warn(errMessage, { + ...session.loggingMetadata, + transportMessage: initMessage, + tags: ["invalid-request"] + }); + cancelStream( + initMessage.streamId, + Err({ + code: INVALID_REQUEST_CODE, + message: errMessage + }) + ); + return null; + } + if (!initMessage.procedureName) { + const errMessage = `missing procedure name in stream open message`; + this.log?.warn(errMessage, { + ...session.loggingMetadata, + transportMessage: initMessage, + tags: ["invalid-request"] + }); + cancelStream( + initMessage.streamId, + Err({ + code: INVALID_REQUEST_CODE, + message: errMessage + }) + ); + return null; + } + if (!(initMessage.serviceName in this.services)) { + const errMessage = `couldn't find service ${initMessage.serviceName}`; + this.log?.warn(errMessage, { + ...session.loggingMetadata, + clientId: this.transport.clientId, + transportMessage: initMessage, + tags: ["invalid-request"] + }); + cancelStream( + initMessage.streamId, + Err({ + code: INVALID_REQUEST_CODE, + message: errMessage + }) + ); + return null; + } + const service = this.services[initMessage.serviceName]; + if (!(initMessage.procedureName in service.procedures)) { + const errMessage = `couldn't find a matching procedure for ${initMessage.serviceName}.${initMessage.procedureName}`; + this.log?.warn(errMessage, { + ...session.loggingMetadata, + transportMessage: initMessage, + tags: ["invalid-request"] + }); + cancelStream( + initMessage.streamId, + Err({ + code: INVALID_REQUEST_CODE, + message: errMessage + }) + ); + return null; + } + const serviceContext = this.getContext(service, initMessage.serviceName); + const procedure = service.procedures[initMessage.procedureName]; + if (!["rpc", "upload", "stream", "subscription"].includes(procedure.type)) { + this.log?.error( + `got request for invalid procedure type ${procedure.type} at ${initMessage.serviceName}.${initMessage.procedureName}`, + { + ...session.loggingMetadata, + transportMessage: initMessage, + tags: ["invariant-violation"] + } + ); + return null; + } + let passInitAsDataForBackwardsCompat = false; + if (session.protocolVersion === "v1.1" && (procedure.type === "upload" || procedure.type === "stream") && Value.Check(procedure.requestData, initMessage.payload) && Value.Check(procedure.requestInit, {})) { + passInitAsDataForBackwardsCompat = true; + } else if (!Value.Check(procedure.requestInit, initMessage.payload)) { + const errMessage = `procedure init failed validation`; + this.log?.warn(errMessage, { + ...session.loggingMetadata, + clientId: this.transport.clientId, + transportMessage: initMessage, + tags: ["invalid-request"] + }); + cancelStream( + initMessage.streamId, + Err({ + code: INVALID_REQUEST_CODE, + message: errMessage + }) + ); + return null; + } + return { + initialSession: session, + streamId: initMessage.streamId, + procedureName: initMessage.procedureName, + serviceName: initMessage.serviceName, + tracingCtx: initMessage.tracing, + initPayload: initMessage.payload, + sessionMetadata, + procedure, + serviceContext, + procClosesWithInit: isStreamCloseBackwardsCompat( + initMessage.controlFlags, + session.protocolVersion + ), + passInitAsDataForBackwardsCompat + }; + } + cancelStream(to, sessionScopedSend, streamId, payload) { + let cancelledStreamsInSession = this.serverCancelledStreams.get(to); + if (!cancelledStreamsInSession) { + cancelledStreamsInSession = new LRUSet( + this.maxCancelledStreamTombstonesPerSession + ); + this.serverCancelledStreams.set(to, cancelledStreamsInSession); + } + cancelledStreamsInSession.add(streamId); + const msg = cancelMessage(streamId, payload); + sessionScopedSend(msg); + } + async close() { + this.unregisterTransportListeners(); + for (const serviceName of Object.keys(this.services)) { + const service = this.services[serviceName]; + await service[Symbol.asyncDispose](); + } + } +}; +var LRUSet = class { + items; + maxItems; + constructor(maxItems) { + this.items = /* @__PURE__ */ new Set(); + this.maxItems = maxItems; + } + add(item) { + if (this.items.has(item)) { + this.items.delete(item); + } else if (this.items.size >= this.maxItems) { + const first = this.items.values().next(); + if (!first.done) { + this.items.delete(first.value); + } + } + this.items.add(item); + } + has(item) { + return this.items.has(item); + } +}; +function isStreamCancelBackwardsCompat(controlFlags, protocolVersion) { + if (protocolVersion === "v1.1") { + return false; + } + return isStreamCancel(controlFlags); +} +function isStreamCloseBackwardsCompat(controlFlags, protocolVersion) { + if (protocolVersion === "v1.1") { + return isStreamCancel(controlFlags); + } + return isStreamClose(controlFlags); +} +function getStreamCloseBackwardsCompat(protocolVersion) { + if (protocolVersion === "v1.1") { + return 4 /* StreamCancelBit */; + } + return 8 /* StreamClosedBit */; +} +function createServer(transport, services2, providedServerOptions) { + return new RiverServer( + transport, + services2, + providedServerOptions?.handshakeOptions, + providedServerOptions?.extendedContext, + providedServerOptions?.maxCancelledStreamTombstonesPerSession, + providedServerOptions?.middlewares + ); +} + +// router/handshake.ts +function createServerHandshakeOptions(schema, validate) { + return { schema, validate }; +} + +// package.json +var version = "0.212.2"; + +// tracing/index.ts +function createSessionTelemetryInfo(tracer, sessionId, to, from, propagationCtx) { + const parentCtx = propagationCtx ? propagation.extract(context.active(), propagationCtx) : context.active(); + const span = tracer.startSpan( + `river.session`, + { + attributes: { + component: "river", + "river.session.id": sessionId, + "river.session.to": to, + "river.session.from": from + } + }, + parentCtx + ); + const ctx = trace.setSpan(parentCtx, span); + return { span, ctx }; +} +function createConnectionTelemetryInfo(tracer, connection, info) { + const span = tracer.startSpan( + `river.connection`, + { + attributes: { + component: "river", + "river.connection.id": connection.id + }, + links: [{ context: info.span.spanContext() }] + }, + info.ctx + ); + const ctx = trace.setSpan(info.ctx, span); + return { span, ctx }; +} +function createHandlerSpan(tracer, session, kind, serviceName, procedureName, streamId, tracing, fn) { + const ctx = tracing ? propagation.extract(context.active(), tracing) : context.active(); + return tracer.startActiveSpan( + `river.server.${serviceName}.${procedureName}`, + { + attributes: { + component: "river", + "river.method.kind": kind, + "river.method.service": serviceName, + "river.method.name": procedureName, + "river.streamId": streamId, + "span.kind": "server" + }, + links: [{ context: session.telemetry.span.spanContext() }], + kind: SpanKind.SERVER + }, + ctx, + fn + ); +} +function recordRiverError(span, error) { + span.setStatus({ + code: SpanStatusCode.ERROR, + message: error.message + }); + span.setAttributes({ + "river.error_code": error.code, + "river.error_message": error.message + }); +} +function getTracer() { + return trace.getTracer("river", version); +} + +// transport/sessionStateMachine/SessionWaitingForHandshake.ts +var SessionWaitingForHandshake = class extends CommonSession { + state = "WaitingForHandshake" /* WaitingForHandshake */; + conn; + listeners; + handshakeTimeout; + constructor(props) { + super(props); + this.conn = props.conn; + this.listeners = props.listeners; + this.handshakeTimeout = setTimeout(() => { + this.listeners.onHandshakeTimeout(); + }, this.options.handshakeTimeoutMs); + this.conn.setDataListener(this.onHandshakeData); + this.conn.setErrorListener(this.listeners.onConnectionErrored); + this.conn.setCloseListener(this.listeners.onConnectionClosed); + } + get loggingMetadata() { + return { + clientId: this.from, + connId: this.conn.id, + ...this.conn.loggingMetadata + }; + } + onHandshakeData = (msg) => { + const parsedMsgRes = this.codec.fromBuffer(msg); + if (!parsedMsgRes.ok) { + this.listeners.onInvalidHandshake( + `could not parse handshake message: ${parsedMsgRes.reason}`, + "MALFORMED_HANDSHAKE" + ); + return; + } + this.listeners.onHandshake(parsedMsgRes.value); + }; + sendHandshake(msg) { + return sendMessage(this.conn, this.codec, msg); + } + _handleStateExit() { + this.conn.removeDataListener(); + this.conn.removeErrorListener(); + this.conn.removeCloseListener(); + clearTimeout(this.handshakeTimeout); + this.handshakeTimeout = void 0; + } + _handleClose() { + this.conn.close(); + } +}; + +// transport/sessionStateMachine/SessionHandshaking.ts +var SessionHandshaking = class extends IdentifiedSessionWithGracePeriod { + state = "Handshaking" /* Handshaking */; + conn; + listeners; + handshakeTimeout; + constructor(props) { + super(props); + this.conn = props.conn; + this.listeners = props.listeners; + this.handshakeTimeout = setTimeout(() => { + this.listeners.onHandshakeTimeout(); + }, this.options.handshakeTimeoutMs); + this.conn.setDataListener(this.onHandshakeData); + this.conn.setErrorListener(this.listeners.onConnectionErrored); + this.conn.setCloseListener(this.listeners.onConnectionClosed); + } + get loggingMetadata() { + return { + ...super.loggingMetadata, + ...this.conn.loggingMetadata + }; + } + onHandshakeData = (msg) => { + const parsedMsgRes = this.codec.fromBuffer(msg); + if (!parsedMsgRes.ok) { + this.listeners.onInvalidHandshake( + `could not parse handshake message: ${parsedMsgRes.reason}`, + "MALFORMED_HANDSHAKE" + ); + return; + } + this.listeners.onHandshake(parsedMsgRes.value); + }; + sendHandshake(msg) { + return sendMessage(this.conn, this.codec, msg); + } + _handleStateExit() { + super._handleStateExit(); + this.conn.removeDataListener(); + this.conn.removeErrorListener(); + this.conn.removeCloseListener(); + if (this.handshakeTimeout) { + clearTimeout(this.handshakeTimeout); + this.handshakeTimeout = void 0; + } + } + _handleClose() { + super._handleClose(); + this.conn.close(); + } +}; + +// transport/sessionStateMachine/SessionConnected.ts +var SessionConnected = class extends IdentifiedSession { + state = "Connected" /* Connected */; + conn; + listeners; + heartbeatHandle; + heartbeatMissTimeout; + isActivelyHeartbeating = false; + updateBookkeeping(ack, seq) { + this.sendBuffer = this.sendBuffer.filter((unacked) => unacked.seq >= ack); + this.ack = seq + 1; + if (this.heartbeatMissTimeout) { + clearTimeout(this.heartbeatMissTimeout); + } + this.startMissingHeartbeatTimeout(); + } + assertSendOrdering(constructedMsg) { + if (constructedMsg.seq > this.seqSent + 1) { + const msg = `invariant violation: would have sent out of order msg (seq: ${constructedMsg.seq}, expected: ${this.seqSent} + 1)`; + this.log?.error(msg, { + ...this.loggingMetadata, + transportMessage: constructedMsg, + tags: ["invariant-violation"] + }); + throw new Error(msg); + } + } + send(msg) { + const constructedMsg = this.constructMsg(msg); + this.assertSendOrdering(constructedMsg); + this.sendBuffer.push(constructedMsg); + const res = sendMessage(this.conn, this.codec, constructedMsg); + if (!res.ok) { + this.listeners.onMessageSendFailure(constructedMsg, res.reason); + return res; + } + this.seqSent = constructedMsg.seq; + return res; + } + constructor(props) { + super(props); + this.conn = props.conn; + this.listeners = props.listeners; + this.conn.setDataListener(this.onMessageData); + this.conn.setCloseListener(this.listeners.onConnectionClosed); + this.conn.setErrorListener(this.listeners.onConnectionErrored); + } + sendBufferedMessages() { + if (this.sendBuffer.length > 0) { + this.log?.info( + `sending ${this.sendBuffer.length} buffered messages, starting at seq ${this.nextSeq()}`, + this.loggingMetadata + ); + for (const msg of this.sendBuffer) { + this.assertSendOrdering(msg); + const res = sendMessage(this.conn, this.codec, msg); + if (!res.ok) { + this.listeners.onMessageSendFailure(msg, res.reason); + return res; + } + this.seqSent = msg.seq; + } + } + return { ok: true, value: void 0 }; + } + get loggingMetadata() { + return { + ...super.loggingMetadata, + ...this.conn.loggingMetadata + }; + } + startMissingHeartbeatTimeout() { + const maxMisses = this.options.heartbeatsUntilDead; + const missDuration = maxMisses * this.options.heartbeatIntervalMs; + this.heartbeatMissTimeout = setTimeout(() => { + this.log?.info( + `closing connection to ${this.to} due to inactivity (missed ${maxMisses} heartbeats which is ${missDuration}ms)`, + this.loggingMetadata + ); + this.telemetry.span.addEvent( + "closing connection due to missing heartbeat" + ); + this.conn.close(); + }, missDuration); + } + startActiveHeartbeat() { + this.isActivelyHeartbeating = true; + this.heartbeatHandle = setInterval(() => { + this.sendHeartbeat(); + }, this.options.heartbeatIntervalMs); + } + sendHeartbeat() { + this.log?.debug("sending heartbeat", this.loggingMetadata); + const heartbeat = { + streamId: "heartbeat", + controlFlags: 1 /* AckBit */, + payload: { + type: "ACK" + } + }; + this.send(heartbeat); + } + onMessageData = (msg) => { + const parsedMsgRes = this.codec.fromBuffer(msg); + if (!parsedMsgRes.ok) { + this.listeners.onInvalidMessage( + `could not parse message: ${parsedMsgRes.reason}` + ); + return; + } + const parsedMsg = parsedMsgRes.value; + if (parsedMsg.seq !== this.ack) { + if (parsedMsg.seq < this.ack) { + this.log?.debug( + `received duplicate msg (got seq: ${parsedMsg.seq}, wanted seq: ${this.ack}), discarding`, + { + ...this.loggingMetadata, + transportMessage: parsedMsg + } + ); + } else { + const reason = `received out-of-order msg, closing connection (got seq: ${parsedMsg.seq}, wanted seq: ${this.ack})`; + this.log?.error(reason, { + ...this.loggingMetadata, + transportMessage: parsedMsg, + tags: ["invariant-violation"] + }); + this.telemetry.span.setStatus({ + code: SpanStatusCode.ERROR, + message: reason + }); + this.conn.close(); + } + return; + } + this.log?.debug(`received msg`, { + ...this.loggingMetadata, + transportMessage: parsedMsg + }); + this.updateBookkeeping(parsedMsg.ack, parsedMsg.seq); + if (!isAck(parsedMsg.controlFlags)) { + this.listeners.onMessage(parsedMsg); + return; + } + this.log?.debug(`discarding msg (ack bit set)`, { + ...this.loggingMetadata, + transportMessage: parsedMsg + }); + if (!this.isActivelyHeartbeating) { + this.sendHeartbeat(); + } + }; + _handleStateExit() { + super._handleStateExit(); + this.conn.removeDataListener(); + this.conn.removeCloseListener(); + this.conn.removeErrorListener(); + if (this.heartbeatHandle) { + clearInterval(this.heartbeatHandle); + this.heartbeatHandle = void 0; + } + if (this.heartbeatMissTimeout) { + clearTimeout(this.heartbeatMissTimeout); + this.heartbeatMissTimeout = void 0; + } + } + _handleClose() { + super._handleClose(); + this.conn.close(); + } +}; + +// transport/sessionStateMachine/SessionBackingOff.ts +var SessionBackingOff = class extends IdentifiedSessionWithGracePeriod { + state = "BackingOff" /* BackingOff */; + listeners; + backoffTimeout; + constructor(props) { + super(props); + this.listeners = props.listeners; + this.backoffTimeout = setTimeout(() => { + this.listeners.onBackoffFinished(); + }, props.backoffMs); + } + _handleClose() { + super._handleClose(); + } + _handleStateExit() { + super._handleStateExit(); + if (this.backoffTimeout) { + clearTimeout(this.backoffTimeout); + this.backoffTimeout = void 0; + } + } +}; + +// codec/adapter.ts +import { Value as Value2 } from "@sinclair/typebox/value"; +var CodecMessageAdapter = class { + constructor(codec) { + this.codec = codec; + } + toBuffer(msg) { + try { + return { + ok: true, + value: this.codec.toBuffer(msg) + }; + } catch (e) { + return { + ok: false, + reason: coerceErrorString(e) + }; + } + } + fromBuffer(buf) { + try { + const parsedMsg = this.codec.fromBuffer(buf); + if (!Value2.Check(OpaqueTransportMessageSchema, parsedMsg)) { + return { + ok: false, + reason: "transport message schema mismatch" + }; + } + return { + ok: true, + value: parsedMsg + }; + } catch (e) { + return { + ok: false, + reason: coerceErrorString(e) + }; + } + } +}; + +// transport/sessionStateMachine/transitions.ts +function inheritSharedSession(session) { + return { + id: session.id, + from: session.from, + to: session.to, + seq: session.seq, + ack: session.ack, + seqSent: session.seqSent, + sendBuffer: session.sendBuffer, + telemetry: session.telemetry, + options: session.options, + log: session.log, + tracer: session.tracer, + protocolVersion: session.protocolVersion, + codec: session.codec + }; +} +function inheritSharedSessionWithGrace(session) { + return { + ...inheritSharedSession(session), + graceExpiryTime: session.graceExpiryTime + }; +} +var SessionStateGraph = { + entrypoints: { + NoConnection: (to, from, listeners, options, protocolVersion, tracer, log) => { + const id = `session-${generateId()}`; + const telemetry = createSessionTelemetryInfo(tracer, id, to, from); + const sendBuffer = []; + const session = new SessionNoConnection({ + listeners, + id, + from, + to, + seq: 0, + ack: 0, + seqSent: 0, + graceExpiryTime: Date.now() + options.sessionDisconnectGraceMs, + sendBuffer, + telemetry, + options, + protocolVersion, + tracer, + log, + codec: new CodecMessageAdapter(options.codec) + }); + session.log?.info(`session ${session.id} created in NoConnection state`, { + ...session.loggingMetadata, + tags: ["state-transition"] + }); + return session; + }, + WaitingForHandshake: (from, conn, listeners, options, tracer, log) => { + const session = new SessionWaitingForHandshake({ + conn, + listeners, + from, + options, + tracer, + log, + codec: new CodecMessageAdapter(options.codec) + }); + session.log?.info(`session created in WaitingForHandshake state`, { + ...session.loggingMetadata, + tags: ["state-transition"] + }); + return session; + } + }, + // All of the transitions 'move'/'consume' the old session and return a new one. + // After a session is transitioned, any usage of the old session will throw. + transition: { + // happy path transitions + NoConnectionToBackingOff: (oldSession, backoffMs, listeners) => { + const carriedState = inheritSharedSessionWithGrace(oldSession); + oldSession._handleStateExit(); + const session = new SessionBackingOff({ + backoffMs, + listeners, + ...carriedState + }); + session.log?.info( + `session ${session.id} transition from NoConnection to BackingOff`, + { + ...session.loggingMetadata, + tags: ["state-transition"] + } + ); + return session; + }, + BackingOffToConnecting: (oldSession, connPromise, listeners) => { + const carriedState = inheritSharedSessionWithGrace(oldSession); + oldSession._handleStateExit(); + const session = new SessionConnecting({ + connPromise, + listeners, + ...carriedState + }); + session.log?.info( + `session ${session.id} transition from BackingOff to Connecting`, + { + ...session.loggingMetadata, + tags: ["state-transition"] + } + ); + return session; + }, + ConnectingToHandshaking: (oldSession, conn, listeners) => { + const carriedState = inheritSharedSessionWithGrace(oldSession); + oldSession._handleStateExit(); + const session = new SessionHandshaking({ + conn, + listeners, + ...carriedState + }); + conn.telemetry = createConnectionTelemetryInfo( + session.tracer, + conn, + session.telemetry + ); + session.log?.info( + `session ${session.id} transition from Connecting to Handshaking`, + { + ...session.loggingMetadata, + tags: ["state-transition"] + } + ); + return session; + }, + HandshakingToConnected: (oldSession, listeners) => { + const carriedState = inheritSharedSession(oldSession); + const conn = oldSession.conn; + oldSession._handleStateExit(); + const session = new SessionConnected({ + conn, + listeners, + ...carriedState + }); + session.startMissingHeartbeatTimeout(); + session.log?.info( + `session ${session.id} transition from Handshaking to Connected`, + { + ...session.loggingMetadata, + tags: ["state-transition"] + } + ); + return session; + }, + WaitingForHandshakeToConnected: (pendingSession, oldSession, sessionId, to, propagationCtx, listeners, protocolVersion) => { + const conn = pendingSession.conn; + const { from, options } = pendingSession; + const carriedState = oldSession ? ( + // old session exists, inherit state + inheritSharedSession(oldSession) + ) : ( + // old session does not exist, create new state + { + id: sessionId, + from, + to, + seq: 0, + ack: 0, + seqSent: 0, + sendBuffer: [], + telemetry: createSessionTelemetryInfo( + pendingSession.tracer, + sessionId, + to, + from, + propagationCtx + ), + options, + tracer: pendingSession.tracer, + log: pendingSession.log, + protocolVersion, + codec: new CodecMessageAdapter(options.codec) + } + ); + pendingSession._handleStateExit(); + oldSession?._handleStateExit(); + const session = new SessionConnected({ + conn, + listeners, + ...carriedState + }); + session.startMissingHeartbeatTimeout(); + conn.telemetry = createConnectionTelemetryInfo( + session.tracer, + conn, + session.telemetry + ); + session.log?.info( + `session ${session.id} transition from WaitingForHandshake to Connected`, + { + ...session.loggingMetadata, + tags: ["state-transition"] + } + ); + return session; + }, + // disconnect paths + BackingOffToNoConnection: (oldSession, listeners) => { + const carriedState = inheritSharedSessionWithGrace(oldSession); + oldSession._handleStateExit(); + const session = new SessionNoConnection({ + listeners, + ...carriedState + }); + session.log?.info( + `session ${session.id} transition from BackingOff to NoConnection`, + { + ...session.loggingMetadata, + tags: ["state-transition"] + } + ); + return session; + }, + ConnectingToNoConnection: (oldSession, listeners) => { + const carriedState = inheritSharedSessionWithGrace(oldSession); + oldSession.bestEffortClose(); + oldSession._handleStateExit(); + const session = new SessionNoConnection({ + listeners, + ...carriedState + }); + session.log?.info( + `session ${session.id} transition from Connecting to NoConnection`, + { + ...session.loggingMetadata, + tags: ["state-transition"] + } + ); + return session; + }, + HandshakingToNoConnection: (oldSession, listeners) => { + const carriedState = inheritSharedSessionWithGrace(oldSession); + oldSession.conn.close(); + oldSession._handleStateExit(); + const session = new SessionNoConnection({ + listeners, + ...carriedState + }); + session.log?.info( + `session ${session.id} transition from Handshaking to NoConnection`, + { + ...session.loggingMetadata, + tags: ["state-transition"] + } + ); + return session; + }, + ConnectedToNoConnection: (oldSession, listeners) => { + const carriedState = inheritSharedSession(oldSession); + const graceExpiryTime = Date.now() + oldSession.options.sessionDisconnectGraceMs; + oldSession.conn.close(); + oldSession._handleStateExit(); + const session = new SessionNoConnection({ + listeners, + graceExpiryTime, + ...carriedState + }); + session.log?.info( + `session ${session.id} transition from Connected to NoConnection`, + { + ...session.loggingMetadata, + tags: ["state-transition"] + } + ); + return session; + } + } +}; +var transitions = SessionStateGraph.transition; +var ClientSessionStateGraph = { + entrypoint: SessionStateGraph.entrypoints.NoConnection, + transition: { + // happy paths + // NoConnection -> BackingOff: attempt to connect + NoConnectionToBackingOff: transitions.NoConnectionToBackingOff, + // BackingOff -> Connecting: backoff period elapsed, start connection + BackingOffToConnecting: transitions.BackingOffToConnecting, + // Connecting -> Handshaking: connection established, start handshake + ConnectingToHandshaking: transitions.ConnectingToHandshaking, + // Handshaking -> Connected: handshake complete, session ready + HandshakingToConnected: transitions.HandshakingToConnected, + // disconnect paths + // BackingOff -> NoConnection: unused + BackingOffToNoConnection: transitions.BackingOffToNoConnection, + // Connecting -> NoConnection: connection failed or connection timeout + ConnectingToNoConnection: transitions.ConnectingToNoConnection, + // Handshaking -> NoConnection: connection closed or handshake timeout + HandshakingToNoConnection: transitions.HandshakingToNoConnection, + // Connected -> NoConnection: connection closed + ConnectedToNoConnection: transitions.ConnectedToNoConnection + // destroy/close paths + // NoConnection -> x: grace period elapsed + // BackingOff -> x: grace period elapsed + // Connecting -> x: grace period elapsed + // Handshaking -> x: grace period elapsed or invalid handshake message or handshake rejection + // Connected -> x: grace period elapsed or invalid message + } +}; +var ServerSessionStateGraph = { + entrypoint: SessionStateGraph.entrypoints.WaitingForHandshake, + transition: { + // happy paths + // WaitingForHandshake -> Connected: handshake complete, session ready + WaitingForHandshakeToConnected: transitions.WaitingForHandshakeToConnected, + // disconnect paths + // Connected -> NoConnection: connection closed + ConnectedToNoConnection: transitions.ConnectedToNoConnection + // destroy/close paths + // WaitingForHandshake -> x: handshake timeout elapsed or invalid handshake message or handshake rejection or connection closed + } +}; + +// transport/transport.ts +var Transport = class { + /** + * The status of the transport. + */ + status; + /** + * The client ID of this transport. + */ + clientId; + /** + * The event dispatcher for handling events of type EventTypes. + */ + eventDispatcher; + /** + * The options for this transport. + */ + options; + log; + tracer; + sessions; + /** + * Creates a new Transport instance. + * @param codec The codec used to encode and decode messages. + * @param clientId The client ID of this transport. + */ + constructor(clientId, providedOptions) { + this.options = { ...defaultTransportOptions, ...providedOptions }; + this.eventDispatcher = new EventDispatcher(); + this.clientId = clientId; + this.status = "open"; + this.sessions = /* @__PURE__ */ new Map(); + this.tracer = getTracer(); + } + bindLogger(fn, level) { + if (typeof fn === "function") { + this.log = createLogProxy(new BaseLogger(fn, level)); + return; + } + this.log = createLogProxy(fn); + } + /** + * Called when a message is received by this transport. + * You generally shouldn't need to override this in downstream transport implementations. + * @param message The received message. + */ + handleMsg(message) { + if (this.getStatus() !== "open") return; + this.eventDispatcher.dispatchEvent("message", message); + } + /** + * Adds a listener to this transport. + * @param the type of event to listen for + * @param handler The message handler to add. + */ + addEventListener(type, handler) { + this.eventDispatcher.addEventListener(type, handler); + } + /** + * Removes a listener from this transport. + * @param the type of event to un-listen on + * @param handler The message handler to remove. + */ + removeEventListener(type, handler) { + this.eventDispatcher.removeEventListener(type, handler); + } + protocolError(message) { + this.eventDispatcher.dispatchEvent("protocolError", message); + } + /** + * Default close implementation for transports. You should override this in the downstream + * implementation if you need to do any additional cleanup and call super.close() at the end. + * Closes the transport. Any messages sent while the transport is closed will be silently discarded. + */ + close() { + this.status = "closed"; + const sessions = Array.from(this.sessions.values()); + for (const session of sessions) { + this.deleteSession(session); + } + this.eventDispatcher.dispatchEvent("transportStatus", { + status: this.status + }); + this.eventDispatcher.removeAllListeners(); + this.log?.info(`manually closed transport`, { clientId: this.clientId }); + } + getStatus() { + return this.status; + } + // state transitions + createSession(session) { + const activeSession = this.sessions.get(session.to); + if (activeSession) { + const msg = `attempt to create session for ${session.to} but active session (${activeSession.id}) already exists`; + this.log?.error(msg, { + ...session.loggingMetadata, + tags: ["invariant-violation"] + }); + throw new Error(msg); + } + this.sessions.set(session.to, session); + this.eventDispatcher.dispatchEvent("sessionStatus", { + status: "created", + session + }); + this.eventDispatcher.dispatchEvent("sessionTransition", { + state: session.state, + id: session.id + }); + } + updateSession(session) { + const activeSession = this.sessions.get(session.to); + if (!activeSession) { + const msg = `attempt to transition session for ${session.to} but no active session exists`; + this.log?.error(msg, { + ...session.loggingMetadata, + tags: ["invariant-violation"] + }); + throw new Error(msg); + } + if (activeSession.id !== session.id) { + const msg = `attempt to transition active session for ${session.to} but active session (${activeSession.id}) is different from handle (${session.id})`; + this.log?.error(msg, { + ...session.loggingMetadata, + tags: ["invariant-violation"] + }); + throw new Error(msg); + } + this.sessions.set(session.to, session); + this.eventDispatcher.dispatchEvent("sessionTransition", { + state: session.state, + id: session.id + }); + } + deleteSession(session, options) { + if (session._isConsumed) return; + const loggingMetadata = session.loggingMetadata; + if (loggingMetadata.tags && options?.unhealthy) { + loggingMetadata.tags.push("unhealthy-session"); + } + session.log?.info(`closing session ${session.id}`, loggingMetadata); + this.eventDispatcher.dispatchEvent("sessionStatus", { + status: "closing", + session + }); + const to = session.to; + session.close(); + this.sessions.delete(to); + this.eventDispatcher.dispatchEvent("sessionStatus", { + status: "closed", + session: { id: session.id, to } + }); + } + // common listeners + onSessionGracePeriodElapsed(session) { + this.log?.info( + `session to ${session.to} grace period elapsed, closing`, + session.loggingMetadata + ); + this.deleteSession(session); + } + onConnectingFailed(session) { + const noConnectionSession = SessionStateGraph.transition.ConnectingToNoConnection(session, { + onSessionGracePeriodElapsed: () => { + this.onSessionGracePeriodElapsed(noConnectionSession); + } + }); + this.updateSession(noConnectionSession); + return noConnectionSession; + } + onConnClosed(session) { + let noConnectionSession; + if (session.state === "Handshaking" /* Handshaking */) { + noConnectionSession = SessionStateGraph.transition.HandshakingToNoConnection(session, { + onSessionGracePeriodElapsed: () => { + this.onSessionGracePeriodElapsed(noConnectionSession); + } + }); + } else { + noConnectionSession = SessionStateGraph.transition.ConnectedToNoConnection(session, { + onSessionGracePeriodElapsed: () => { + this.onSessionGracePeriodElapsed(noConnectionSession); + } + }); + } + this.updateSession(noConnectionSession); + return noConnectionSession; + } + /** + * Gets a send closure scoped to a specific session. Sending using the returned + * closure after the session has transitioned to a different state will be a noop. + * + * Session objects themselves can become stale as they transition between + * states. As stale sessions cannot be used again (and will throw), holding + * onto a session object is not recommended. + */ + getSessionBoundSendFn(to, sessionId) { + if (this.getStatus() !== "open") { + throw new Error("cannot get a bound send function on a closed transport"); + } + return (msg) => { + const session = this.sessions.get(to); + if (!session) { + throw new Error( + `session scope for ${sessionId} has ended (close), can't send` + ); + } + const sameSession = session.id === sessionId; + if (!sameSession || session._isConsumed) { + throw new Error( + `session scope for ${sessionId} has ended (transition), can't send` + ); + } + const res = session.send(msg); + if (!res.ok) { + throw new Error(res.reason); + } + return res.value; + }; + } +}; + +// transport/server.ts +import { Value as Value3 } from "@sinclair/typebox/value"; +var ServerTransport = class extends Transport { + /** + * The options for this transport. + */ + options; + /** + * Optional handshake options for the server. + */ + handshakeExtensions; + /** + * A map of session handshake data for each session. + */ + sessionHandshakeMetadata = /* @__PURE__ */ new Map(); + sessions = /* @__PURE__ */ new Map(); + pendingSessions = /* @__PURE__ */ new Set(); + constructor(clientId, providedOptions) { + super(clientId, providedOptions); + this.sessions = /* @__PURE__ */ new Map(); + this.options = { + ...defaultServerTransportOptions, + ...providedOptions + }; + this.log?.info(`initiated server transport`, { + clientId: this.clientId, + protocolVersion: currentProtocolVersion + }); + } + extendHandshake(options) { + this.handshakeExtensions = options; + } + deletePendingSession(pendingSession) { + pendingSession.close(); + this.pendingSessions.delete(pendingSession); + } + deleteSession(session, options) { + this.sessionHandshakeMetadata.delete(session.to); + super.deleteSession(session, options); + } + handleConnection(conn) { + if (this.getStatus() !== "open") return; + this.log?.info(`new incoming connection`, { + ...conn.loggingMetadata, + clientId: this.clientId + }); + let receivedHandshake = false; + const pendingSession = ServerSessionStateGraph.entrypoint( + this.clientId, + conn, + { + onConnectionClosed: () => { + this.log?.warn( + `connection from unknown closed before handshake finished`, + pendingSession.loggingMetadata + ); + this.deletePendingSession(pendingSession); + }, + onConnectionErrored: (err) => { + const errorString = coerceErrorString(err); + this.log?.warn( + `connection from unknown errored before handshake finished: ${errorString}`, + pendingSession.loggingMetadata + ); + this.deletePendingSession(pendingSession); + }, + onHandshakeTimeout: () => { + this.log?.warn( + `connection from unknown timed out before handshake finished`, + pendingSession.loggingMetadata + ); + this.deletePendingSession(pendingSession); + }, + onHandshake: (msg) => { + if (receivedHandshake) { + this.log?.error( + `received multiple handshake messages from pending session`, + { + ...pendingSession.loggingMetadata, + connectedTo: msg.from, + transportMessage: msg + } + ); + this.deletePendingSession(pendingSession); + return; + } + receivedHandshake = true; + void this.onHandshakeRequest(pendingSession, msg); + }, + onInvalidHandshake: (reason, code) => { + this.log?.error( + `invalid handshake: ${reason}`, + pendingSession.loggingMetadata + ); + this.deletePendingSession(pendingSession); + this.protocolError({ + type: ProtocolError.HandshakeFailed, + code, + message: reason + }); + } + }, + this.options, + this.tracer, + this.log + ); + this.pendingSessions.add(pendingSession); + } + rejectHandshakeRequest(session, to, reason, code, metadata) { + session.conn.telemetry?.span.setStatus({ + code: SpanStatusCode.ERROR, + message: reason + }); + this.log?.warn(reason, metadata); + const responseMsg = handshakeResponseMessage({ + from: this.clientId, + to, + status: { + ok: false, + code, + reason + } + }); + const res = session.sendHandshake(responseMsg); + if (!res.ok) { + this.log?.error(`failed to send handshake response: ${res.reason}`, { + ...session.loggingMetadata, + transportMessage: responseMsg + }); + this.protocolError({ + type: ProtocolError.MessageSendFailure, + message: res.reason + }); + this.deletePendingSession(session); + return; + } + this.protocolError({ + type: ProtocolError.HandshakeFailed, + code, + message: reason + }); + this.deletePendingSession(session); + } + async onHandshakeRequest(session, msg) { + if (!Value3.Check(ControlMessageHandshakeRequestSchema, msg.payload)) { + this.rejectHandshakeRequest( + session, + msg.from, + "received invalid handshake request", + "MALFORMED_HANDSHAKE", + { + ...session.loggingMetadata, + transportMessage: msg, + connectedTo: msg.from, + validationErrors: [ + ...Value3.Errors(ControlMessageHandshakeRequestSchema, msg.payload) + ] + } + ); + return; + } + const gotVersion = msg.payload.protocolVersion; + if (!isAcceptedProtocolVersion(gotVersion)) { + this.rejectHandshakeRequest( + session, + msg.from, + `expected protocol version oneof [${acceptedProtocolVersions.toString()}], got ${gotVersion}`, + "PROTOCOL_VERSION_MISMATCH", + { + ...session.loggingMetadata, + connectedTo: msg.from, + transportMessage: msg + } + ); + return; + } + let parsedMetadata = {}; + if (this.handshakeExtensions) { + if (!Value3.Check(this.handshakeExtensions.schema, msg.payload.metadata)) { + this.rejectHandshakeRequest( + session, + msg.from, + "received malformed handshake metadata", + "MALFORMED_HANDSHAKE_META", + { + ...session.loggingMetadata, + connectedTo: msg.from, + validationErrors: [ + ...Value3.Errors( + this.handshakeExtensions.schema, + msg.payload.metadata + ) + ] + } + ); + return; + } + const previousParsedMetadata = this.sessionHandshakeMetadata.get( + msg.from + ); + const parsedMetadataOrFailureCode = await this.handshakeExtensions.validate( + msg.payload.metadata, + previousParsedMetadata + ); + if (session._isConsumed) { + return; + } + if (Value3.Check( + HandshakeErrorCustomHandlerFatalResponseCodes, + parsedMetadataOrFailureCode + )) { + this.rejectHandshakeRequest( + session, + msg.from, + "rejected by handshake handler", + parsedMetadataOrFailureCode, + { + ...session.loggingMetadata, + connectedTo: msg.from, + clientId: this.clientId + } + ); + return; + } + parsedMetadata = parsedMetadataOrFailureCode; + } + let connectCase = "new session"; + const clientNextExpectedSeq = msg.payload.expectedSessionState.nextExpectedSeq; + const clientNextSentSeq = msg.payload.expectedSessionState.nextSentSeq; + let oldSession = this.sessions.get(msg.from); + if (this.options.enableTransparentSessionReconnects && oldSession && oldSession.id === msg.payload.sessionId) { + connectCase = "transparent reconnection"; + const ourNextSeq = oldSession.nextSeq(); + const ourAck = oldSession.ack; + if (clientNextSentSeq > ourAck) { + this.rejectHandshakeRequest( + session, + msg.from, + `client is in the future: server wanted next message to be ${ourAck} but client would have sent ${clientNextSentSeq}`, + "SESSION_STATE_MISMATCH", + { + ...session.loggingMetadata, + connectedTo: msg.from, + transportMessage: msg + } + ); + return; + } + if (ourNextSeq > clientNextExpectedSeq) { + this.rejectHandshakeRequest( + session, + msg.from, + `server is in the future: client wanted next message to be ${clientNextExpectedSeq} but server would have sent ${ourNextSeq}`, + "SESSION_STATE_MISMATCH", + { + ...session.loggingMetadata, + connectedTo: msg.from, + transportMessage: msg + } + ); + return; + } + if (oldSession.state !== "NoConnection" /* NoConnection */) { + const noConnectionSession = ServerSessionStateGraph.transition.ConnectedToNoConnection( + oldSession, + { + onSessionGracePeriodElapsed: () => { + this.onSessionGracePeriodElapsed(noConnectionSession); + } + } + ); + oldSession = noConnectionSession; + this.updateSession(oldSession); + } + } else if (oldSession) { + connectCase = "hard reconnection"; + this.log?.info( + `client is reconnecting to a new session (${msg.payload.sessionId}) with an old session (${oldSession.id}) already existing, closing old session`, + { + ...session.loggingMetadata, + connectedTo: msg.from, + sessionId: msg.payload.sessionId + } + ); + this.deleteSession(oldSession); + oldSession = void 0; + } + if (!oldSession && (clientNextSentSeq > 0 || clientNextExpectedSeq > 0)) { + connectCase = "unknown session"; + const rejectionMessage = this.options.enableTransparentSessionReconnects ? `client is trying to reconnect to a session the server don't know about: ${msg.payload.sessionId}` : `client is attempting a transparent reconnect to a session but the server does not support it: ${msg.payload.sessionId}`; + this.rejectHandshakeRequest( + session, + msg.from, + rejectionMessage, + "SESSION_STATE_MISMATCH", + { + ...session.loggingMetadata, + connectedTo: msg.from, + transportMessage: msg + } + ); + return; + } + const sessionId = msg.payload.sessionId; + this.log?.info( + `handshake from ${msg.from} ok (${connectCase}), responding with handshake success`, + { + ...session.loggingMetadata, + connectedTo: msg.from + } + ); + const responseMsg = handshakeResponseMessage({ + from: this.clientId, + to: msg.from, + status: { + ok: true, + sessionId + } + }); + const res = session.sendHandshake(responseMsg); + if (!res.ok) { + this.log?.error(`failed to send handshake response: ${res.reason}`, { + ...session.loggingMetadata, + transportMessage: responseMsg + }); + this.protocolError({ + type: ProtocolError.MessageSendFailure, + message: res.reason + }); + this.deletePendingSession(session); + return; + } + this.pendingSessions.delete(session); + const connectedSession = ServerSessionStateGraph.transition.WaitingForHandshakeToConnected( + session, + // by this point oldSession is either no connection or we dont have an old session + oldSession, + sessionId, + msg.from, + msg.tracing, + { + onConnectionErrored: (err) => { + const errStr = coerceErrorString(err); + this.log?.warn( + `connection to ${connectedSession.to} errored: ${errStr}`, + connectedSession.loggingMetadata + ); + }, + onConnectionClosed: () => { + this.log?.info( + `connection to ${connectedSession.to} closed`, + connectedSession.loggingMetadata + ); + this.onConnClosed(connectedSession); + }, + onMessage: (msg2) => { + this.handleMsg(msg2); + }, + onInvalidMessage: (reason) => { + this.log?.error(`invalid message: ${reason}`, { + ...connectedSession.loggingMetadata, + transportMessage: msg + }); + this.protocolError({ + type: ProtocolError.InvalidMessage, + message: reason + }); + this.deleteSession(connectedSession, { unhealthy: true }); + }, + onMessageSendFailure: (msg2, reason) => { + this.log?.error(`failed to send message: ${reason}`, { + ...connectedSession.loggingMetadata, + transportMessage: msg2 + }); + this.protocolError({ + type: ProtocolError.MessageSendFailure, + message: reason + }); + this.deleteSession(connectedSession, { unhealthy: true }); + } + }, + gotVersion + ); + const bufferSendRes = connectedSession.sendBufferedMessages(); + if (!bufferSendRes.ok) { + return; + } + this.sessionHandshakeMetadata.set(connectedSession.to, parsedMetadata); + if (oldSession) { + this.updateSession(connectedSession); + } else { + this.createSession(connectedSession); + } + connectedSession.startActiveHeartbeat(); + } +}; + +// transport/impls/ws/server.ts +function cleanHeaders(headers) { + const cleanedHeaders = {}; + for (const [key, value] of Object.entries(headers)) { + if (!key.startsWith("sec-") && value) { + const cleanedValue = Array.isArray(value) ? value[0] : value; + cleanedHeaders[key] = cleanedValue; + } + } + return cleanedHeaders; +} +var WebSocketServerTransport = class extends ServerTransport { + wss; + constructor(wss, clientId, providedOptions) { + super(clientId, providedOptions); + this.wss = wss; + this.wss.on("connection", this.connectionHandler); + } + connectionHandler = (ws, req) => { + const conn = new WebSocketConnection(ws, { + headers: cleanHeaders(req.headersDistinct) + }); + this.handleConnection(conn); + }; + close() { + super.close(); + this.wss.off("connection", this.connectionHandler); + } +}; + +// python-client/tests/test_server_handshake.ts +import { Type as Type6 } from "@sinclair/typebox"; +var ServiceSchema = createServiceSchema(); +var HandshakeTestServiceSchema = ServiceSchema.define({ + echo: Procedure.rpc({ + requestInit: Type6.Object({ msg: Type6.String() }), + responseData: Type6.Object({ response: Type6.String() }), + responseError: Type6.Never(), + async handler({ reqInit }) { + return Ok({ response: reqInit.msg }); + } + }) +}); +var services = { + test: HandshakeTestServiceSchema +}; +var handshakeSchema = Type6.Object({ token: Type6.String() }); +async function main() { + const httpServer = http.createServer(); + const port = await new Promise((resolve, reject) => { + httpServer.listen(0, "127.0.0.1", () => { + const addr = httpServer.address(); + if (typeof addr === "object" && addr) resolve(addr.port); + else reject(new Error("couldn't get port")); + }); + }); + const wss = new WebSocketServer({ server: httpServer }); + const serverTransport = new WebSocketServerTransport(wss, "HANDSHAKE_SERVER"); + const _server = createServer(serverTransport, services, { + handshakeOptions: createServerHandshakeOptions( + handshakeSchema, + (metadata) => { + if (metadata.token !== "valid-token") { + return "REJECTED_BY_CUSTOM_HANDLER"; + } + return {}; + } + ) + }); + process.stdout.write(`RIVER_PORT=${port} +`); + process.on("SIGTERM", () => { + void _server.close().then(() => { + httpServer.close(); + process.exit(0); + }); + }); + process.on("SIGINT", () => { + void _server.close().then(() => { + httpServer.close(); + process.exit(0); + }); + }); +} +main().catch((err) => { + console.error("Failed to start handshake test server:", err); + process.exit(1); +}); diff --git a/python-client/tests/test_server_handshake.ts b/python-client/tests/test_server_handshake.ts new file mode 100644 index 00000000..6ce07c06 --- /dev/null +++ b/python-client/tests/test_server_handshake.ts @@ -0,0 +1,83 @@ +/** + * Test server with handshake validation for Python client tests. + * + * Requires clients to send handshake metadata with {token: string}. + * Valid token is "valid-token". + */ +import http from 'node:http'; +import { WebSocketServer } from 'ws'; +import { WebSocketServerTransport } from '../../transport/impls/ws/server'; +import { + createServer, + createServiceSchema, + Procedure, + Ok, +} from '../../router'; +import { + createServerHandshakeOptions, +} from '../../router/handshake'; +import { Type } from '@sinclair/typebox'; + +const ServiceSchema = createServiceSchema(); + +const HandshakeTestServiceSchema = ServiceSchema.define({ + echo: Procedure.rpc({ + requestInit: Type.Object({ msg: Type.String() }), + responseData: Type.Object({ response: Type.String() }), + responseError: Type.Never(), + async handler({ reqInit }) { + return Ok({ response: reqInit.msg }); + }, + }), +}); + +const services = { + test: HandshakeTestServiceSchema, +}; + +const handshakeSchema = Type.Object({ token: Type.String() }); + +async function main() { + const httpServer = http.createServer(); + const port = await new Promise((resolve, reject) => { + httpServer.listen(0, '127.0.0.1', () => { + const addr = httpServer.address(); + if (typeof addr === 'object' && addr) resolve(addr.port); + else reject(new Error("couldn't get port")); + }); + }); + + const wss = new WebSocketServer({ server: httpServer }); + const serverTransport = new WebSocketServerTransport(wss, 'HANDSHAKE_SERVER'); + const _server = createServer(serverTransport, services, { + handshakeOptions: createServerHandshakeOptions( + handshakeSchema, + (metadata) => { + if (metadata.token !== 'valid-token') { + return 'REJECTED_BY_CUSTOM_HANDLER' as const; + } + return {}; + }, + ), + }); + + process.stdout.write(`RIVER_PORT=${port}\n`); + + process.on('SIGTERM', () => { + void _server.close().then(() => { + httpServer.close(); + process.exit(0); + }); + }); + process.on('SIGINT', () => { + void _server.close().then(() => { + httpServer.close(); + process.exit(0); + }); + }); +} + +main().catch((err: unknown) => { + console.error('Failed to start handshake test server:', err); + process.exit(1); +}); diff --git a/python-client/tests/test_session.py b/python-client/tests/test_session.py new file mode 100644 index 00000000..f006ffda --- /dev/null +++ b/python-client/tests/test_session.py @@ -0,0 +1,227 @@ +"""Deterministic session lifecycle tests. + +Uses short timeouts to test heartbeat miss, grace period expiry, +and retry budget behavior without slow waits. +""" + +from __future__ import annotations + +import asyncio + +import pytest + +from river.client import RiverClient +from river.codec import NaiveJsonCodec +from river.session import SessionOptions, SessionState +from river.transport import WebSocketClientTransport + +SHORT_OPTIONS = SessionOptions( + heartbeat_interval_ms=100, + heartbeats_until_dead=2, # 200ms miss timeout + session_disconnect_grace_ms=300, # 300ms grace + connection_timeout_ms=2000, + handshake_timeout_ms=1000, +) + + +async def make_client( + server_url: str, + options: SessionOptions | None = None, +) -> RiverClient: + transport = WebSocketClientTransport( + ws_url=server_url, + client_id=None, + server_id="SERVER", + codec=NaiveJsonCodec(), + options=options or SHORT_OPTIONS, + ) + return RiverClient( + transport, + server_id="SERVER", + connect_on_invoke=True, + eagerly_connect=False, + ) + + +async def cleanup(client: RiverClient) -> None: + await client.transport.close() + + +# ===================================================================== +# Heartbeat Miss Tests +# ===================================================================== + + +class TestHeartbeatMiss: + @pytest.mark.asyncio + async def test_ws_close_triggers_no_connection(self, server_url: str): + """Force-closing WS transitions session to NO_CONNECTION.""" + client = await make_client(server_url) + try: + # Make an RPC to establish connection + result = await client.rpc("test", "add", {"n": 1}) + assert result["ok"] is True + + session = client.transport.sessions.get("SERVER") + assert session is not None + assert session.state == SessionState.CONNECTED + + # Force-close the WS (not the transport) + ws = session._ws + assert ws is not None + # Disable reconnect so we can observe the state + client.transport.reconnect_on_connection_drop = False + await ws.close() + + # Wait for the connection drop to be processed + await asyncio.sleep(0.3) + + assert session.state == SessionState.NO_CONNECTION + finally: + await cleanup(client) + + @pytest.mark.asyncio + async def test_active_rpcs_keep_alive(self, server_url: str): + """Active RPCs reset heartbeat miss — no spurious disconnect.""" + client = await make_client(server_url) + try: + # Make several RPCs over a period longer than heartbeat_interval + for _ in range(5): + result = await client.rpc("test", "add", {"n": 1}) + assert result["ok"] is True + await asyncio.sleep(0.05) + + session = client.transport.sessions.get("SERVER") + assert session is not None + assert session.state == SessionState.CONNECTED + finally: + await cleanup(client) + + +# ===================================================================== +# Grace Period Tests +# ===================================================================== + + +class TestGracePeriod: + @pytest.mark.asyncio + async def test_grace_period_expiry_destroys_session(self, server_url: str): + """Session destroyed after grace period elapses without reconnect.""" + client = await make_client(server_url) + try: + result = await client.rpc("test", "add", {"n": 1}) + assert result["ok"] is True + + session = client.transport.sessions.get("SERVER") + assert session is not None + session_id = session.id + + # Force WS close and disable reconnect + client.transport.reconnect_on_connection_drop = False + ws = session._ws + assert ws is not None + await ws.close() + + # Wait for drop processing + await asyncio.sleep(0.1) + assert session.state == SessionState.NO_CONNECTION + + # Wait for grace period to elapse (300ms + buffer) + await asyncio.sleep(0.4) + + # Session should have been deleted + remaining = client.transport.sessions.get("SERVER") + assert remaining is None or remaining.id != session_id + finally: + await cleanup(client) + + @pytest.mark.asyncio + async def test_reconnect_within_grace_preserves_session(self, server_url: str): + """Reconnecting within grace period preserves the session.""" + # Use longer grace to ensure reconnect completes in time + opts = SessionOptions( + heartbeat_interval_ms=100, + heartbeats_until_dead=2, + session_disconnect_grace_ms=5000, + connection_timeout_ms=2000, + handshake_timeout_ms=1000, + ) + client = await make_client(server_url, options=opts) + try: + result = await client.rpc("test", "add", {"n": 1}) + assert result["ok"] is True + + session = client.transport.sessions.get("SERVER") + assert session is not None + + # Force WS close — auto-reconnect is on by default + ws = session._ws + assert ws is not None + await ws.close() + + # Wait for reconnect to complete (well within 300ms grace) + await asyncio.sleep(0.5) + + # Session should still exist with same ID + new_session = client.transport.sessions.get("SERVER") + # Either same session or a new one (server may have lost state) + assert new_session is not None + + # Verify connection works + result = await client.rpc("test", "add", {"n": 2}) + assert result["ok"] is True + finally: + await cleanup(client) + + +# ===================================================================== +# Retry Budget Tests +# ===================================================================== + + +class TestRetryBudget: + @pytest.mark.asyncio + async def test_backoff_increases_on_failures(self, server_url: str): + """Retry backoff increases after failed attempts.""" + transport = WebSocketClientTransport( + ws_url="ws://127.0.0.1:1", # intentionally invalid + client_id=None, + server_id="INVALID", + codec=NaiveJsonCodec(), + options=SessionOptions( + connection_timeout_ms=200, + handshake_timeout_ms=200, + session_disconnect_grace_ms=500, + ), + ) + try: + budget = transport._retry_budget + assert budget.has_budget() + initial_backoff = budget.get_backoff_ms() + + # Consume some budget to simulate failures + budget.consume_budget() + budget.consume_budget() + budget.consume_budget() + + higher_backoff = budget.get_backoff_ms() + assert higher_backoff > initial_backoff + finally: + await transport.close() + + @pytest.mark.asyncio + async def test_budget_restores_after_success(self, server_url: str): + """Budget restores gradually after successful connection.""" + client = await make_client(server_url) + try: + # Make an RPC to trigger a successful connection + result = await client.rpc("test", "add", {"n": 1}) + assert result["ok"] is True + + budget = client.transport._retry_budget + # After a successful connection the budget_consumed should be + # restoring (or already at 0) + await asyncio.sleep(0.3) # wait for budget restore + assert budget.budget_consumed <= 1 # mostly restored + finally: + await cleanup(client) From f3a7488399f030707477b2463a1a161f977e543d Mon Sep 17 00:00:00 2001 From: Jacky Zhao Date: Mon, 2 Mar 2026 17:15:50 -0800 Subject: [PATCH 08/29] fix --- python-client/tests/extract_test_schema.mjs | 5 ++++- python-client/tests/extract_test_schema.ts | 5 ++++- python-client/tests/test_server.ts | 12 +++++++++--- python-client/tests/test_server_handshake.mjs | 5 ++++- python-client/tests/test_server_handshake.ts | 17 +++++++---------- 5 files changed, 28 insertions(+), 16 deletions(-) diff --git a/python-client/tests/extract_test_schema.mjs b/python-client/tests/extract_test_schema.mjs index a4e8fe05..0cb6644b 100644 --- a/python-client/tests/extract_test_schema.mjs +++ b/python-client/tests/extract_test_schema.mjs @@ -473,7 +473,10 @@ var TestServiceSchema = ServiceSchema.define({ }), echoBinary: Procedure.rpc({ requestInit: Type5.Object({ data: Type5.Uint8Array() }), - responseData: Type5.Object({ data: Type5.Uint8Array(), length: Type5.Number() }), + responseData: Type5.Object({ + data: Type5.Uint8Array(), + length: Type5.Number() + }), responseError: Type5.Never(), async handler({ reqInit }) { return Ok({ data: reqInit.data, length: reqInit.data.length }); diff --git a/python-client/tests/extract_test_schema.ts b/python-client/tests/extract_test_schema.ts index 48aaa506..85ed26ba 100644 --- a/python-client/tests/extract_test_schema.ts +++ b/python-client/tests/extract_test_schema.ts @@ -54,7 +54,10 @@ const TestServiceSchema = ServiceSchema.define({ }), echoBinary: Procedure.rpc({ requestInit: Type.Object({ data: Type.Uint8Array() }), - responseData: Type.Object({ data: Type.Uint8Array(), length: Type.Number() }), + responseData: Type.Object({ + data: Type.Uint8Array(), + length: Type.Number(), + }), responseError: Type.Never(), async handler({ reqInit }) { return Ok({ data: reqInit.data, length: reqInit.data.length }); diff --git a/python-client/tests/test_server.ts b/python-client/tests/test_server.ts index 13f18efe..906261f6 100644 --- a/python-client/tests/test_server.ts +++ b/python-client/tests/test_server.ts @@ -77,7 +77,10 @@ const TestServiceSchema = ServiceSchema.define({ }), echoBinary: Procedure.rpc({ requestInit: Type.Object({ data: Type.Uint8Array() }), - responseData: Type.Object({ data: Type.Uint8Array(), length: Type.Number() }), + responseData: Type.Object({ + data: Type.Uint8Array(), + length: Type.Number(), + }), responseError: Type.Never(), async handler({ reqInit }) { return Ok({ data: reqInit.data, length: reqInit.data.length }); @@ -409,7 +412,8 @@ const services = { }; async function main() { - const codec = process.env.RIVER_CODEC === 'binary' ? BinaryCodec : NaiveJsonCodec; + const codec = + process.env.RIVER_CODEC === 'binary' ? BinaryCodec : NaiveJsonCodec; const httpServer = http.createServer(); const port = await new Promise((resolve, reject) => { @@ -421,7 +425,9 @@ async function main() { }); const wss = new WebSocketServer({ server: httpServer }); - const serverTransport = new WebSocketServerTransport(wss, 'SERVER', { codec }); + const serverTransport = new WebSocketServerTransport(wss, 'SERVER', { + codec, + }); const _server = createServer(serverTransport, services); // Signal that the server is ready by printing the port diff --git a/python-client/tests/test_server_handshake.mjs b/python-client/tests/test_server_handshake.mjs index 12756031..1a113014 100644 --- a/python-client/tests/test_server_handshake.mjs +++ b/python-client/tests/test_server_handshake.mjs @@ -4501,7 +4501,10 @@ async function main() { }); }); const wss = new WebSocketServer({ server: httpServer }); - const serverTransport = new WebSocketServerTransport(wss, "HANDSHAKE_SERVER"); + const serverTransport = new WebSocketServerTransport( + wss, + "HANDSHAKE_SERVER" + ); const _server = createServer(serverTransport, services, { handshakeOptions: createServerHandshakeOptions( handshakeSchema, diff --git a/python-client/tests/test_server_handshake.ts b/python-client/tests/test_server_handshake.ts index 6ce07c06..c20b024a 100644 --- a/python-client/tests/test_server_handshake.ts +++ b/python-client/tests/test_server_handshake.ts @@ -7,15 +7,8 @@ import http from 'node:http'; import { WebSocketServer } from 'ws'; import { WebSocketServerTransport } from '../../transport/impls/ws/server'; -import { - createServer, - createServiceSchema, - Procedure, - Ok, -} from '../../router'; -import { - createServerHandshakeOptions, -} from '../../router/handshake'; +import { createServer, createServiceSchema, Procedure, Ok } from '../../router'; +import { createServerHandshakeOptions } from '../../router/handshake'; import { Type } from '@sinclair/typebox'; const ServiceSchema = createServiceSchema(); @@ -48,7 +41,10 @@ async function main() { }); const wss = new WebSocketServer({ server: httpServer }); - const serverTransport = new WebSocketServerTransport(wss, 'HANDSHAKE_SERVER'); + const serverTransport = new WebSocketServerTransport( + wss, + 'HANDSHAKE_SERVER', + ); const _server = createServer(serverTransport, services, { handshakeOptions: createServerHandshakeOptions( handshakeSchema, @@ -56,6 +52,7 @@ async function main() { if (metadata.token !== 'valid-token') { return 'REJECTED_BY_CUSTOM_HANDLER' as const; } + return {}; }, ), From a841abe45fb5e9b83faa838a83f46c374958de45 Mon Sep 17 00:00:00 2001 From: Jacky Zhao Date: Mon, 2 Mar 2026 17:24:22 -0800 Subject: [PATCH 09/29] lint fix --- python-client/tests/test_server_handshake.mjs | 2065 +++++++++-------- 1 file changed, 1125 insertions(+), 940 deletions(-) diff --git a/python-client/tests/test_server_handshake.mjs b/python-client/tests/test_server_handshake.mjs index 1a113014..a1307bbb 100644 --- a/python-client/tests/test_server_handshake.mjs +++ b/python-client/tests/test_server_handshake.mjs @@ -1,9 +1,9 @@ // python-client/tests/test_server_handshake.ts -import http from "node:http"; -import { WebSocketServer } from "ws"; +import http from 'node:http'; +import { WebSocketServer } from 'ws'; // node_modules/nanoid/index.js -import { webcrypto as crypto } from "node:crypto"; +import { webcrypto as crypto } from 'node:crypto'; var POOL_SIZE_MULTIPLIER = 128; var pool; var poolOffset; @@ -19,19 +19,19 @@ function fillPool(bytes) { poolOffset += bytes; } function random(bytes) { - fillPool(bytes |= 0); + fillPool((bytes |= 0)); return pool.subarray(poolOffset - bytes, poolOffset); } function customRandom(alphabet2, defaultSize, getRandom) { - let mask = (2 << 31 - Math.clz32(alphabet2.length - 1 | 1)) - 1; - let step = Math.ceil(1.6 * mask * defaultSize / alphabet2.length); + let mask = (2 << (31 - Math.clz32((alphabet2.length - 1) | 1))) - 1; + let step = Math.ceil((1.6 * mask * defaultSize) / alphabet2.length); return (size = defaultSize) => { - let id = ""; + let id = ''; while (true) { let bytes = getRandom(step); let i = step; while (i--) { - id += alphabet2[bytes[i] & mask] || ""; + id += alphabet2[bytes[i] & mask] || ''; if (id.length >= size) return id; } } @@ -43,7 +43,7 @@ function customAlphabet(alphabet2, size = 21) { // transport/id.ts var alphabet = customAlphabet( - "1234567890abcdefghijklmnopqrstuvxyzABCDEFGHIJKLMNOPQRSTUVXYZ" + '1234567890abcdefghijklmnopqrstuvxyzABCDEFGHIJKLMNOPQRSTUVXYZ', ); var generateId = () => alphabet(12); @@ -60,7 +60,7 @@ var Connection = class { const spanContext = this.telemetry.span.spanContext(); metadata.telemetry = { traceId: spanContext.traceId, - spanId: spanContext.spanId + spanId: spanContext.spanId, }; } return metadata; @@ -143,7 +143,7 @@ var WebSocketConnection = class extends Connection { super(); this.ws = ws; this.extras = extras; - this.ws.binaryType = "arraybuffer"; + this.ws.binaryType = 'arraybuffer'; let didError = false; this.ws.onerror = () => { didError = true; @@ -173,10 +173,10 @@ var WebSocketConnection = class extends Connection { }; // node_modules/@opentelemetry/api/build/esm/platform/node/globalThis.js -var _globalThis = typeof globalThis === "object" ? globalThis : global; +var _globalThis = typeof globalThis === 'object' ? globalThis : global; // node_modules/@opentelemetry/api/build/esm/version.js -var VERSION = "1.8.0"; +var VERSION = '1.8.0'; // node_modules/@opentelemetry/api/build/esm/internal/semver.js var re = /^(\d+)\.(\d+)\.(\d+)(-(.+))?$/; @@ -185,7 +185,7 @@ function _makeCompatibilityCheck(ownVersion) { var rejectedVersions = /* @__PURE__ */ new Set(); var myVersionMatch = ownVersion.match(re); if (!myVersionMatch) { - return function() { + return function () { return false; }; } @@ -193,7 +193,7 @@ function _makeCompatibilityCheck(ownVersion) { major: +myVersionMatch[1], minor: +myVersionMatch[2], patch: +myVersionMatch[3], - prerelease: myVersionMatch[4] + prerelease: myVersionMatch[4], }; if (ownVersionParsed.prerelease != null) { return function isExactmatch(globalVersion) { @@ -223,7 +223,7 @@ function _makeCompatibilityCheck(ownVersion) { major: +globalVersionMatch[1], minor: +globalVersionMatch[2], patch: +globalVersionMatch[3], - prerelease: globalVersionMatch[4] + prerelease: globalVersionMatch[4], }; if (globalVersionParsed.prerelease != null) { return _reject(globalVersion); @@ -232,7 +232,10 @@ function _makeCompatibilityCheck(ownVersion) { return _reject(globalVersion); } if (ownVersionParsed.major === 0) { - if (ownVersionParsed.minor === globalVersionParsed.minor && ownVersionParsed.patch <= globalVersionParsed.patch) { + if ( + ownVersionParsed.minor === globalVersionParsed.minor && + ownVersionParsed.patch <= globalVersionParsed.patch + ) { return _accept(globalVersion); } return _reject(globalVersion); @@ -246,41 +249,70 @@ function _makeCompatibilityCheck(ownVersion) { var isCompatible = _makeCompatibilityCheck(VERSION); // node_modules/@opentelemetry/api/build/esm/internal/global-utils.js -var major = VERSION.split(".")[0]; -var GLOBAL_OPENTELEMETRY_API_KEY = Symbol.for("opentelemetry.js.api." + major); +var major = VERSION.split('.')[0]; +var GLOBAL_OPENTELEMETRY_API_KEY = Symbol.for('opentelemetry.js.api.' + major); var _global = _globalThis; function registerGlobal(type, instance, diag2, allowOverride) { var _a; if (allowOverride === void 0) { allowOverride = false; } - var api = _global[GLOBAL_OPENTELEMETRY_API_KEY] = (_a = _global[GLOBAL_OPENTELEMETRY_API_KEY]) !== null && _a !== void 0 ? _a : { - version: VERSION - }; + var api = (_global[GLOBAL_OPENTELEMETRY_API_KEY] = + (_a = _global[GLOBAL_OPENTELEMETRY_API_KEY]) !== null && _a !== void 0 + ? _a + : { + version: VERSION, + }); if (!allowOverride && api[type]) { - var err = new Error("@opentelemetry/api: Attempted duplicate registration of API: " + type); + var err = new Error( + '@opentelemetry/api: Attempted duplicate registration of API: ' + type, + ); diag2.error(err.stack || err.message); return false; } if (api.version !== VERSION) { - var err = new Error("@opentelemetry/api: Registration of version v" + api.version + " for " + type + " does not match previously registered API v" + VERSION); + var err = new Error( + '@opentelemetry/api: Registration of version v' + + api.version + + ' for ' + + type + + ' does not match previously registered API v' + + VERSION, + ); diag2.error(err.stack || err.message); return false; } api[type] = instance; - diag2.debug("@opentelemetry/api: Registered a global for " + type + " v" + VERSION + "."); + diag2.debug( + '@opentelemetry/api: Registered a global for ' + + type + + ' v' + + VERSION + + '.', + ); return true; } function getGlobal(type) { var _a, _b; - var globalVersion = (_a = _global[GLOBAL_OPENTELEMETRY_API_KEY]) === null || _a === void 0 ? void 0 : _a.version; + var globalVersion = + (_a = _global[GLOBAL_OPENTELEMETRY_API_KEY]) === null || _a === void 0 + ? void 0 + : _a.version; if (!globalVersion || !isCompatible(globalVersion)) { return; } - return (_b = _global[GLOBAL_OPENTELEMETRY_API_KEY]) === null || _b === void 0 ? void 0 : _b[type]; + return (_b = _global[GLOBAL_OPENTELEMETRY_API_KEY]) === null || _b === void 0 + ? void 0 + : _b[type]; } function unregisterGlobal(type, diag2) { - diag2.debug("@opentelemetry/api: Unregistering a global for " + type + " v" + VERSION + "."); + diag2.debug( + '@opentelemetry/api: Unregistering a global for ' + + type + + ' v' + + VERSION + + '.', + ); var api = _global[GLOBAL_OPENTELEMETRY_API_KEY]; if (api) { delete api[type]; @@ -288,78 +320,81 @@ function unregisterGlobal(type, diag2) { } // node_modules/@opentelemetry/api/build/esm/diag/ComponentLogger.js -var __read = function(o, n) { - var m = typeof Symbol === "function" && o[Symbol.iterator]; +var __read = function (o, n) { + var m = typeof Symbol === 'function' && o[Symbol.iterator]; if (!m) return o; - var i = m.call(o), r, ar = [], e; + var i = m.call(o), + r, + ar = [], + e; try { while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value); } catch (error) { e = { error }; } finally { try { - if (r && !r.done && (m = i["return"])) m.call(i); + if (r && !r.done && (m = i['return'])) m.call(i); } finally { if (e) throw e.error; } } return ar; }; -var __spreadArray = function(to, from, pack) { - if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) { - if (ar || !(i in from)) { - if (!ar) ar = Array.prototype.slice.call(from, 0, i); - ar[i] = from[i]; +var __spreadArray = function (to, from, pack) { + if (pack || arguments.length === 2) + for (var i = 0, l = from.length, ar; i < l; i++) { + if (ar || !(i in from)) { + if (!ar) ar = Array.prototype.slice.call(from, 0, i); + ar[i] = from[i]; + } } - } return to.concat(ar || Array.prototype.slice.call(from)); }; -var DiagComponentLogger = ( +var DiagComponentLogger = /** @class */ - function() { + (function () { function DiagComponentLogger2(props) { - this._namespace = props.namespace || "DiagComponentLogger"; + this._namespace = props.namespace || 'DiagComponentLogger'; } - DiagComponentLogger2.prototype.debug = function() { + DiagComponentLogger2.prototype.debug = function () { var args = []; for (var _i = 0; _i < arguments.length; _i++) { args[_i] = arguments[_i]; } - return logProxy("debug", this._namespace, args); + return logProxy('debug', this._namespace, args); }; - DiagComponentLogger2.prototype.error = function() { + DiagComponentLogger2.prototype.error = function () { var args = []; for (var _i = 0; _i < arguments.length; _i++) { args[_i] = arguments[_i]; } - return logProxy("error", this._namespace, args); + return logProxy('error', this._namespace, args); }; - DiagComponentLogger2.prototype.info = function() { + DiagComponentLogger2.prototype.info = function () { var args = []; for (var _i = 0; _i < arguments.length; _i++) { args[_i] = arguments[_i]; } - return logProxy("info", this._namespace, args); + return logProxy('info', this._namespace, args); }; - DiagComponentLogger2.prototype.warn = function() { + DiagComponentLogger2.prototype.warn = function () { var args = []; for (var _i = 0; _i < arguments.length; _i++) { args[_i] = arguments[_i]; } - return logProxy("warn", this._namespace, args); + return logProxy('warn', this._namespace, args); }; - DiagComponentLogger2.prototype.verbose = function() { + DiagComponentLogger2.prototype.verbose = function () { var args = []; for (var _i = 0; _i < arguments.length; _i++) { args[_i] = arguments[_i]; } - return logProxy("verbose", this._namespace, args); + return logProxy('verbose', this._namespace, args); }; return DiagComponentLogger2; - }() -); + })(); function logProxy(funcName, namespace, args) { - var logger = getGlobal("diag"); + var logger = getGlobal('diag'); if (!logger) { return; } @@ -369,14 +404,14 @@ function logProxy(funcName, namespace, args) { // node_modules/@opentelemetry/api/build/esm/diag/types.js var DiagLogLevel; -(function(DiagLogLevel2) { - DiagLogLevel2[DiagLogLevel2["NONE"] = 0] = "NONE"; - DiagLogLevel2[DiagLogLevel2["ERROR"] = 30] = "ERROR"; - DiagLogLevel2[DiagLogLevel2["WARN"] = 50] = "WARN"; - DiagLogLevel2[DiagLogLevel2["INFO"] = 60] = "INFO"; - DiagLogLevel2[DiagLogLevel2["DEBUG"] = 70] = "DEBUG"; - DiagLogLevel2[DiagLogLevel2["VERBOSE"] = 80] = "VERBOSE"; - DiagLogLevel2[DiagLogLevel2["ALL"] = 9999] = "ALL"; +(function (DiagLogLevel2) { + DiagLogLevel2[(DiagLogLevel2['NONE'] = 0)] = 'NONE'; + DiagLogLevel2[(DiagLogLevel2['ERROR'] = 30)] = 'ERROR'; + DiagLogLevel2[(DiagLogLevel2['WARN'] = 50)] = 'WARN'; + DiagLogLevel2[(DiagLogLevel2['INFO'] = 60)] = 'INFO'; + DiagLogLevel2[(DiagLogLevel2['DEBUG'] = 70)] = 'DEBUG'; + DiagLogLevel2[(DiagLogLevel2['VERBOSE'] = 80)] = 'VERBOSE'; + DiagLogLevel2[(DiagLogLevel2['ALL'] = 9999)] = 'ALL'; })(DiagLogLevel || (DiagLogLevel = {})); // node_modules/@opentelemetry/api/build/esm/diag/internal/logLevelLogger.js @@ -389,172 +424,201 @@ function createLogLevelDiagLogger(maxLevel, logger) { logger = logger || {}; function _filterFunc(funcName, theLevel) { var theFunc = logger[funcName]; - if (typeof theFunc === "function" && maxLevel >= theLevel) { + if (typeof theFunc === 'function' && maxLevel >= theLevel) { return theFunc.bind(logger); } - return function() { - }; + return function () {}; } return { - error: _filterFunc("error", DiagLogLevel.ERROR), - warn: _filterFunc("warn", DiagLogLevel.WARN), - info: _filterFunc("info", DiagLogLevel.INFO), - debug: _filterFunc("debug", DiagLogLevel.DEBUG), - verbose: _filterFunc("verbose", DiagLogLevel.VERBOSE) + error: _filterFunc('error', DiagLogLevel.ERROR), + warn: _filterFunc('warn', DiagLogLevel.WARN), + info: _filterFunc('info', DiagLogLevel.INFO), + debug: _filterFunc('debug', DiagLogLevel.DEBUG), + verbose: _filterFunc('verbose', DiagLogLevel.VERBOSE), }; } // node_modules/@opentelemetry/api/build/esm/api/diag.js -var __read2 = function(o, n) { - var m = typeof Symbol === "function" && o[Symbol.iterator]; +var __read2 = function (o, n) { + var m = typeof Symbol === 'function' && o[Symbol.iterator]; if (!m) return o; - var i = m.call(o), r, ar = [], e; + var i = m.call(o), + r, + ar = [], + e; try { while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value); } catch (error) { e = { error }; } finally { try { - if (r && !r.done && (m = i["return"])) m.call(i); + if (r && !r.done && (m = i['return'])) m.call(i); } finally { if (e) throw e.error; } } return ar; }; -var __spreadArray2 = function(to, from, pack) { - if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) { - if (ar || !(i in from)) { - if (!ar) ar = Array.prototype.slice.call(from, 0, i); - ar[i] = from[i]; +var __spreadArray2 = function (to, from, pack) { + if (pack || arguments.length === 2) + for (var i = 0, l = from.length, ar; i < l; i++) { + if (ar || !(i in from)) { + if (!ar) ar = Array.prototype.slice.call(from, 0, i); + ar[i] = from[i]; + } } - } return to.concat(ar || Array.prototype.slice.call(from)); }; -var API_NAME = "diag"; -var DiagAPI = ( +var API_NAME = 'diag'; +var DiagAPI = /** @class */ - function() { + (function () { function DiagAPI2() { function _logProxy(funcName) { - return function() { + return function () { var args = []; for (var _i = 0; _i < arguments.length; _i++) { args[_i] = arguments[_i]; } - var logger = getGlobal("diag"); - if (!logger) - return; - return logger[funcName].apply(logger, __spreadArray2([], __read2(args), false)); + var logger = getGlobal('diag'); + if (!logger) return; + return logger[funcName].apply( + logger, + __spreadArray2([], __read2(args), false), + ); }; } var self = this; - var setLogger = function(logger, optionsOrLogLevel) { + var setLogger = function (logger, optionsOrLogLevel) { var _a, _b, _c; if (optionsOrLogLevel === void 0) { optionsOrLogLevel = { logLevel: DiagLogLevel.INFO }; } if (logger === self) { - var err = new Error("Cannot use diag as the logger for itself. Please use a DiagLogger implementation like ConsoleDiagLogger or a custom implementation"); - self.error((_a = err.stack) !== null && _a !== void 0 ? _a : err.message); + var err = new Error( + 'Cannot use diag as the logger for itself. Please use a DiagLogger implementation like ConsoleDiagLogger or a custom implementation', + ); + self.error( + (_a = err.stack) !== null && _a !== void 0 ? _a : err.message, + ); return false; } - if (typeof optionsOrLogLevel === "number") { + if (typeof optionsOrLogLevel === 'number') { optionsOrLogLevel = { - logLevel: optionsOrLogLevel + logLevel: optionsOrLogLevel, }; } - var oldLogger = getGlobal("diag"); - var newLogger = createLogLevelDiagLogger((_b = optionsOrLogLevel.logLevel) !== null && _b !== void 0 ? _b : DiagLogLevel.INFO, logger); + var oldLogger = getGlobal('diag'); + var newLogger = createLogLevelDiagLogger( + (_b = optionsOrLogLevel.logLevel) !== null && _b !== void 0 + ? _b + : DiagLogLevel.INFO, + logger, + ); if (oldLogger && !optionsOrLogLevel.suppressOverrideMessage) { - var stack = (_c = new Error().stack) !== null && _c !== void 0 ? _c : ""; - oldLogger.warn("Current logger will be overwritten from " + stack); - newLogger.warn("Current logger will overwrite one already registered from " + stack); + var stack = + (_c = new Error().stack) !== null && _c !== void 0 + ? _c + : ''; + oldLogger.warn('Current logger will be overwritten from ' + stack); + newLogger.warn( + 'Current logger will overwrite one already registered from ' + + stack, + ); } - return registerGlobal("diag", newLogger, self, true); + return registerGlobal('diag', newLogger, self, true); }; self.setLogger = setLogger; - self.disable = function() { + self.disable = function () { unregisterGlobal(API_NAME, self); }; - self.createComponentLogger = function(options) { + self.createComponentLogger = function (options) { return new DiagComponentLogger(options); }; - self.verbose = _logProxy("verbose"); - self.debug = _logProxy("debug"); - self.info = _logProxy("info"); - self.warn = _logProxy("warn"); - self.error = _logProxy("error"); + self.verbose = _logProxy('verbose'); + self.debug = _logProxy('debug'); + self.info = _logProxy('info'); + self.warn = _logProxy('warn'); + self.error = _logProxy('error'); } - DiagAPI2.instance = function() { + DiagAPI2.instance = function () { if (!this._instance) { this._instance = new DiagAPI2(); } return this._instance; }; return DiagAPI2; - }() -); + })(); // node_modules/@opentelemetry/api/build/esm/baggage/internal/baggage-impl.js -var __read3 = function(o, n) { - var m = typeof Symbol === "function" && o[Symbol.iterator]; +var __read3 = function (o, n) { + var m = typeof Symbol === 'function' && o[Symbol.iterator]; if (!m) return o; - var i = m.call(o), r, ar = [], e; + var i = m.call(o), + r, + ar = [], + e; try { while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value); } catch (error) { e = { error }; } finally { try { - if (r && !r.done && (m = i["return"])) m.call(i); + if (r && !r.done && (m = i['return'])) m.call(i); } finally { if (e) throw e.error; } } return ar; }; -var __values = function(o) { - var s = typeof Symbol === "function" && Symbol.iterator, m = s && o[s], i = 0; +var __values = function (o) { + var s = typeof Symbol === 'function' && Symbol.iterator, + m = s && o[s], + i = 0; if (m) return m.call(o); - if (o && typeof o.length === "number") return { - next: function() { - if (o && i >= o.length) o = void 0; - return { value: o && o[i++], done: !o }; - } - }; - throw new TypeError(s ? "Object is not iterable." : "Symbol.iterator is not defined."); + if (o && typeof o.length === 'number') + return { + next: function () { + if (o && i >= o.length) o = void 0; + return { value: o && o[i++], done: !o }; + }, + }; + throw new TypeError( + s ? 'Object is not iterable.' : 'Symbol.iterator is not defined.', + ); }; -var BaggageImpl = ( +var BaggageImpl = /** @class */ - function() { + (function () { function BaggageImpl2(entries) { this._entries = entries ? new Map(entries) : /* @__PURE__ */ new Map(); } - BaggageImpl2.prototype.getEntry = function(key) { + BaggageImpl2.prototype.getEntry = function (key) { var entry = this._entries.get(key); if (!entry) { return void 0; } return Object.assign({}, entry); }; - BaggageImpl2.prototype.getAllEntries = function() { - return Array.from(this._entries.entries()).map(function(_a) { - var _b = __read3(_a, 2), k = _b[0], v = _b[1]; + BaggageImpl2.prototype.getAllEntries = function () { + return Array.from(this._entries.entries()).map(function (_a) { + var _b = __read3(_a, 2), + k = _b[0], + v = _b[1]; return [k, v]; }); }; - BaggageImpl2.prototype.setEntry = function(key, entry) { + BaggageImpl2.prototype.setEntry = function (key, entry) { var newBaggage = new BaggageImpl2(this._entries); newBaggage._entries.set(key, entry); return newBaggage; }; - BaggageImpl2.prototype.removeEntry = function(key) { + BaggageImpl2.prototype.removeEntry = function (key) { var newBaggage = new BaggageImpl2(this._entries); newBaggage._entries.delete(key); return newBaggage; }; - BaggageImpl2.prototype.removeEntries = function() { + BaggageImpl2.prototype.removeEntries = function () { var e_1, _a; var keys = []; for (var _i = 0; _i < arguments.length; _i++) { @@ -562,7 +626,11 @@ var BaggageImpl = ( } var newBaggage = new BaggageImpl2(this._entries); try { - for (var keys_1 = __values(keys), keys_1_1 = keys_1.next(); !keys_1_1.done; keys_1_1 = keys_1.next()) { + for ( + var keys_1 = __values(keys), keys_1_1 = keys_1.next(); + !keys_1_1.done; + keys_1_1 = keys_1.next() + ) { var key = keys_1_1.value; newBaggage._entries.delete(key); } @@ -570,19 +638,19 @@ var BaggageImpl = ( e_1 = { error: e_1_1 }; } finally { try { - if (keys_1_1 && !keys_1_1.done && (_a = keys_1.return)) _a.call(keys_1); + if (keys_1_1 && !keys_1_1.done && (_a = keys_1.return)) + _a.call(keys_1); } finally { if (e_1) throw e_1.error; } } return newBaggage; }; - BaggageImpl2.prototype.clear = function() { + BaggageImpl2.prototype.clear = function () { return new BaggageImpl2(); }; return BaggageImpl2; - }() -); + })(); // node_modules/@opentelemetry/api/build/esm/baggage/utils.js var diag = DiagAPI.instance(); @@ -597,235 +665,243 @@ function createBaggage(entries) { function createContextKey(description) { return Symbol.for(description); } -var BaseContext = ( +var BaseContext = /** @class */ - /* @__PURE__ */ function() { + /* @__PURE__ */ (function () { function BaseContext2(parentContext) { var self = this; - self._currentContext = parentContext ? new Map(parentContext) : /* @__PURE__ */ new Map(); - self.getValue = function(key) { + self._currentContext = parentContext + ? new Map(parentContext) + : /* @__PURE__ */ new Map(); + self.getValue = function (key) { return self._currentContext.get(key); }; - self.setValue = function(key, value) { + self.setValue = function (key, value) { var context2 = new BaseContext2(self._currentContext); context2._currentContext.set(key, value); return context2; }; - self.deleteValue = function(key) { + self.deleteValue = function (key) { var context2 = new BaseContext2(self._currentContext); context2._currentContext.delete(key); return context2; }; } return BaseContext2; - }() -); + })(); var ROOT_CONTEXT = new BaseContext(); // node_modules/@opentelemetry/api/build/esm/propagation/TextMapPropagator.js var defaultTextMapGetter = { - get: function(carrier, key) { + get: function (carrier, key) { if (carrier == null) { return void 0; } return carrier[key]; }, - keys: function(carrier) { + keys: function (carrier) { if (carrier == null) { return []; } return Object.keys(carrier); - } + }, }; var defaultTextMapSetter = { - set: function(carrier, key, value) { + set: function (carrier, key, value) { if (carrier == null) { return; } carrier[key] = value; - } + }, }; // node_modules/@opentelemetry/api/build/esm/context/NoopContextManager.js -var __read4 = function(o, n) { - var m = typeof Symbol === "function" && o[Symbol.iterator]; +var __read4 = function (o, n) { + var m = typeof Symbol === 'function' && o[Symbol.iterator]; if (!m) return o; - var i = m.call(o), r, ar = [], e; + var i = m.call(o), + r, + ar = [], + e; try { while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value); } catch (error) { e = { error }; } finally { try { - if (r && !r.done && (m = i["return"])) m.call(i); + if (r && !r.done && (m = i['return'])) m.call(i); } finally { if (e) throw e.error; } } return ar; }; -var __spreadArray3 = function(to, from, pack) { - if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) { - if (ar || !(i in from)) { - if (!ar) ar = Array.prototype.slice.call(from, 0, i); - ar[i] = from[i]; +var __spreadArray3 = function (to, from, pack) { + if (pack || arguments.length === 2) + for (var i = 0, l = from.length, ar; i < l; i++) { + if (ar || !(i in from)) { + if (!ar) ar = Array.prototype.slice.call(from, 0, i); + ar[i] = from[i]; + } } - } return to.concat(ar || Array.prototype.slice.call(from)); }; -var NoopContextManager = ( +var NoopContextManager = /** @class */ - function() { - function NoopContextManager2() { - } - NoopContextManager2.prototype.active = function() { + (function () { + function NoopContextManager2() {} + NoopContextManager2.prototype.active = function () { return ROOT_CONTEXT; }; - NoopContextManager2.prototype.with = function(_context, fn, thisArg) { + NoopContextManager2.prototype.with = function (_context, fn, thisArg) { var args = []; for (var _i = 3; _i < arguments.length; _i++) { args[_i - 3] = arguments[_i]; } return fn.call.apply(fn, __spreadArray3([thisArg], __read4(args), false)); }; - NoopContextManager2.prototype.bind = function(_context, target) { + NoopContextManager2.prototype.bind = function (_context, target) { return target; }; - NoopContextManager2.prototype.enable = function() { + NoopContextManager2.prototype.enable = function () { return this; }; - NoopContextManager2.prototype.disable = function() { + NoopContextManager2.prototype.disable = function () { return this; }; return NoopContextManager2; - }() -); + })(); // node_modules/@opentelemetry/api/build/esm/api/context.js -var __read5 = function(o, n) { - var m = typeof Symbol === "function" && o[Symbol.iterator]; +var __read5 = function (o, n) { + var m = typeof Symbol === 'function' && o[Symbol.iterator]; if (!m) return o; - var i = m.call(o), r, ar = [], e; + var i = m.call(o), + r, + ar = [], + e; try { while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value); } catch (error) { e = { error }; } finally { try { - if (r && !r.done && (m = i["return"])) m.call(i); + if (r && !r.done && (m = i['return'])) m.call(i); } finally { if (e) throw e.error; } } return ar; }; -var __spreadArray4 = function(to, from, pack) { - if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) { - if (ar || !(i in from)) { - if (!ar) ar = Array.prototype.slice.call(from, 0, i); - ar[i] = from[i]; +var __spreadArray4 = function (to, from, pack) { + if (pack || arguments.length === 2) + for (var i = 0, l = from.length, ar; i < l; i++) { + if (ar || !(i in from)) { + if (!ar) ar = Array.prototype.slice.call(from, 0, i); + ar[i] = from[i]; + } } - } return to.concat(ar || Array.prototype.slice.call(from)); }; -var API_NAME2 = "context"; +var API_NAME2 = 'context'; var NOOP_CONTEXT_MANAGER = new NoopContextManager(); -var ContextAPI = ( +var ContextAPI = /** @class */ - function() { - function ContextAPI2() { - } - ContextAPI2.getInstance = function() { + (function () { + function ContextAPI2() {} + ContextAPI2.getInstance = function () { if (!this._instance) { this._instance = new ContextAPI2(); } return this._instance; }; - ContextAPI2.prototype.setGlobalContextManager = function(contextManager) { + ContextAPI2.prototype.setGlobalContextManager = function (contextManager) { return registerGlobal(API_NAME2, contextManager, DiagAPI.instance()); }; - ContextAPI2.prototype.active = function() { + ContextAPI2.prototype.active = function () { return this._getContextManager().active(); }; - ContextAPI2.prototype.with = function(context2, fn, thisArg) { + ContextAPI2.prototype.with = function (context2, fn, thisArg) { var _a; var args = []; for (var _i = 3; _i < arguments.length; _i++) { args[_i - 3] = arguments[_i]; } - return (_a = this._getContextManager()).with.apply(_a, __spreadArray4([context2, fn, thisArg], __read5(args), false)); + return (_a = this._getContextManager()).with.apply( + _a, + __spreadArray4([context2, fn, thisArg], __read5(args), false), + ); }; - ContextAPI2.prototype.bind = function(context2, target) { + ContextAPI2.prototype.bind = function (context2, target) { return this._getContextManager().bind(context2, target); }; - ContextAPI2.prototype._getContextManager = function() { + ContextAPI2.prototype._getContextManager = function () { return getGlobal(API_NAME2) || NOOP_CONTEXT_MANAGER; }; - ContextAPI2.prototype.disable = function() { + ContextAPI2.prototype.disable = function () { this._getContextManager().disable(); unregisterGlobal(API_NAME2, DiagAPI.instance()); }; return ContextAPI2; - }() -); + })(); // node_modules/@opentelemetry/api/build/esm/trace/trace_flags.js var TraceFlags; -(function(TraceFlags2) { - TraceFlags2[TraceFlags2["NONE"] = 0] = "NONE"; - TraceFlags2[TraceFlags2["SAMPLED"] = 1] = "SAMPLED"; +(function (TraceFlags2) { + TraceFlags2[(TraceFlags2['NONE'] = 0)] = 'NONE'; + TraceFlags2[(TraceFlags2['SAMPLED'] = 1)] = 'SAMPLED'; })(TraceFlags || (TraceFlags = {})); // node_modules/@opentelemetry/api/build/esm/trace/invalid-span-constants.js -var INVALID_SPANID = "0000000000000000"; -var INVALID_TRACEID = "00000000000000000000000000000000"; +var INVALID_SPANID = '0000000000000000'; +var INVALID_TRACEID = '00000000000000000000000000000000'; var INVALID_SPAN_CONTEXT = { traceId: INVALID_TRACEID, spanId: INVALID_SPANID, - traceFlags: TraceFlags.NONE + traceFlags: TraceFlags.NONE, }; // node_modules/@opentelemetry/api/build/esm/trace/NonRecordingSpan.js -var NonRecordingSpan = ( +var NonRecordingSpan = /** @class */ - function() { + (function () { function NonRecordingSpan2(_spanContext) { if (_spanContext === void 0) { _spanContext = INVALID_SPAN_CONTEXT; } this._spanContext = _spanContext; } - NonRecordingSpan2.prototype.spanContext = function() { + NonRecordingSpan2.prototype.spanContext = function () { return this._spanContext; }; - NonRecordingSpan2.prototype.setAttribute = function(_key, _value) { + NonRecordingSpan2.prototype.setAttribute = function (_key, _value) { return this; }; - NonRecordingSpan2.prototype.setAttributes = function(_attributes) { + NonRecordingSpan2.prototype.setAttributes = function (_attributes) { return this; }; - NonRecordingSpan2.prototype.addEvent = function(_name, _attributes) { + NonRecordingSpan2.prototype.addEvent = function (_name, _attributes) { return this; }; - NonRecordingSpan2.prototype.setStatus = function(_status) { + NonRecordingSpan2.prototype.setStatus = function (_status) { return this; }; - NonRecordingSpan2.prototype.updateName = function(_name) { + NonRecordingSpan2.prototype.updateName = function (_name) { return this; }; - NonRecordingSpan2.prototype.end = function(_endTime) { - }; - NonRecordingSpan2.prototype.isRecording = function() { + NonRecordingSpan2.prototype.end = function (_endTime) {}; + NonRecordingSpan2.prototype.isRecording = function () { return false; }; - NonRecordingSpan2.prototype.recordException = function(_exception, _time) { - }; + NonRecordingSpan2.prototype.recordException = function ( + _exception, + _time, + ) {}; return NonRecordingSpan2; - }() -); + })(); // node_modules/@opentelemetry/api/build/esm/trace/context-utils.js -var SPAN_KEY = createContextKey("OpenTelemetry Context Key SPAN"); +var SPAN_KEY = createContextKey('OpenTelemetry Context Key SPAN'); function getSpan(context2) { return context2.getValue(SPAN_KEY) || void 0; } @@ -843,7 +919,9 @@ function setSpanContext(context2, spanContext) { } function getSpanContext(context2) { var _a; - return (_a = getSpan(context2)) === null || _a === void 0 ? void 0 : _a.spanContext(); + return (_a = getSpan(context2)) === null || _a === void 0 + ? void 0 + : _a.spanContext(); } // node_modules/@opentelemetry/api/build/esm/trace/spancontext-utils.js @@ -856,7 +934,9 @@ function isValidSpanId(spanId) { return VALID_SPANID_REGEX.test(spanId) && spanId !== INVALID_SPANID; } function isSpanContextValid(spanContext) { - return isValidTraceId(spanContext.traceId) && isValidSpanId(spanContext.spanId); + return ( + isValidTraceId(spanContext.traceId) && isValidSpanId(spanContext.spanId) + ); } function wrapSpanContext(spanContext) { return new NonRecordingSpan(spanContext); @@ -864,27 +944,31 @@ function wrapSpanContext(spanContext) { // node_modules/@opentelemetry/api/build/esm/trace/NoopTracer.js var contextApi = ContextAPI.getInstance(); -var NoopTracer = ( +var NoopTracer = /** @class */ - function() { - function NoopTracer2() { - } - NoopTracer2.prototype.startSpan = function(name, options, context2) { + (function () { + function NoopTracer2() {} + NoopTracer2.prototype.startSpan = function (name, options, context2) { if (context2 === void 0) { context2 = contextApi.active(); } - var root = Boolean(options === null || options === void 0 ? void 0 : options.root); + var root = Boolean( + options === null || options === void 0 ? void 0 : options.root, + ); if (root) { return new NonRecordingSpan(); } var parentFromContext = context2 && getSpanContext(context2); - if (isSpanContext(parentFromContext) && isSpanContextValid(parentFromContext)) { + if ( + isSpanContext(parentFromContext) && + isSpanContextValid(parentFromContext) + ) { return new NonRecordingSpan(parentFromContext); } else { return new NonRecordingSpan(); } }; - NoopTracer2.prototype.startActiveSpan = function(name, arg2, arg3, arg4) { + NoopTracer2.prototype.startActiveSpan = function (name, arg2, arg3, arg4) { var opts; var ctx; var fn; @@ -900,41 +984,55 @@ var NoopTracer = ( ctx = arg3; fn = arg4; } - var parentContext = ctx !== null && ctx !== void 0 ? ctx : contextApi.active(); + var parentContext = + ctx !== null && ctx !== void 0 ? ctx : contextApi.active(); var span = this.startSpan(name, opts, parentContext); var contextWithSpanSet = setSpan(parentContext, span); return contextApi.with(contextWithSpanSet, fn, void 0, span); }; return NoopTracer2; - }() -); + })(); function isSpanContext(spanContext) { - return typeof spanContext === "object" && typeof spanContext["spanId"] === "string" && typeof spanContext["traceId"] === "string" && typeof spanContext["traceFlags"] === "number"; + return ( + typeof spanContext === 'object' && + typeof spanContext['spanId'] === 'string' && + typeof spanContext['traceId'] === 'string' && + typeof spanContext['traceFlags'] === 'number' + ); } // node_modules/@opentelemetry/api/build/esm/trace/ProxyTracer.js var NOOP_TRACER = new NoopTracer(); -var ProxyTracer = ( +var ProxyTracer = /** @class */ - function() { + (function () { function ProxyTracer2(_provider, name, version2, options) { this._provider = _provider; this.name = name; this.version = version2; this.options = options; } - ProxyTracer2.prototype.startSpan = function(name, options, context2) { + ProxyTracer2.prototype.startSpan = function (name, options, context2) { return this._getTracer().startSpan(name, options, context2); }; - ProxyTracer2.prototype.startActiveSpan = function(_name, _options, _context, _fn) { + ProxyTracer2.prototype.startActiveSpan = function ( + _name, + _options, + _context, + _fn, + ) { var tracer = this._getTracer(); return Reflect.apply(tracer.startActiveSpan, tracer, arguments); }; - ProxyTracer2.prototype._getTracer = function() { + ProxyTracer2.prototype._getTracer = function () { if (this._delegate) { return this._delegate; } - var tracer = this._provider.getDelegateTracer(this.name, this.version, this.options); + var tracer = this._provider.getDelegateTracer( + this.name, + this.version, + this.options, + ); if (!tracer) { return NOOP_TRACER; } @@ -942,89 +1040,100 @@ var ProxyTracer = ( return this._delegate; }; return ProxyTracer2; - }() -); + })(); // node_modules/@opentelemetry/api/build/esm/trace/NoopTracerProvider.js -var NoopTracerProvider = ( +var NoopTracerProvider = /** @class */ - function() { - function NoopTracerProvider2() { - } - NoopTracerProvider2.prototype.getTracer = function(_name, _version, _options) { + (function () { + function NoopTracerProvider2() {} + NoopTracerProvider2.prototype.getTracer = function ( + _name, + _version, + _options, + ) { return new NoopTracer(); }; return NoopTracerProvider2; - }() -); + })(); // node_modules/@opentelemetry/api/build/esm/trace/ProxyTracerProvider.js var NOOP_TRACER_PROVIDER = new NoopTracerProvider(); -var ProxyTracerProvider = ( +var ProxyTracerProvider = /** @class */ - function() { - function ProxyTracerProvider2() { - } - ProxyTracerProvider2.prototype.getTracer = function(name, version2, options) { + (function () { + function ProxyTracerProvider2() {} + ProxyTracerProvider2.prototype.getTracer = function ( + name, + version2, + options, + ) { var _a; - return (_a = this.getDelegateTracer(name, version2, options)) !== null && _a !== void 0 ? _a : new ProxyTracer(this, name, version2, options); + return (_a = this.getDelegateTracer(name, version2, options)) !== null && + _a !== void 0 + ? _a + : new ProxyTracer(this, name, version2, options); }; - ProxyTracerProvider2.prototype.getDelegate = function() { + ProxyTracerProvider2.prototype.getDelegate = function () { var _a; - return (_a = this._delegate) !== null && _a !== void 0 ? _a : NOOP_TRACER_PROVIDER; + return (_a = this._delegate) !== null && _a !== void 0 + ? _a + : NOOP_TRACER_PROVIDER; }; - ProxyTracerProvider2.prototype.setDelegate = function(delegate) { + ProxyTracerProvider2.prototype.setDelegate = function (delegate) { this._delegate = delegate; }; - ProxyTracerProvider2.prototype.getDelegateTracer = function(name, version2, options) { + ProxyTracerProvider2.prototype.getDelegateTracer = function ( + name, + version2, + options, + ) { var _a; - return (_a = this._delegate) === null || _a === void 0 ? void 0 : _a.getTracer(name, version2, options); + return (_a = this._delegate) === null || _a === void 0 + ? void 0 + : _a.getTracer(name, version2, options); }; return ProxyTracerProvider2; - }() -); + })(); // node_modules/@opentelemetry/api/build/esm/trace/span_kind.js var SpanKind; -(function(SpanKind2) { - SpanKind2[SpanKind2["INTERNAL"] = 0] = "INTERNAL"; - SpanKind2[SpanKind2["SERVER"] = 1] = "SERVER"; - SpanKind2[SpanKind2["CLIENT"] = 2] = "CLIENT"; - SpanKind2[SpanKind2["PRODUCER"] = 3] = "PRODUCER"; - SpanKind2[SpanKind2["CONSUMER"] = 4] = "CONSUMER"; +(function (SpanKind2) { + SpanKind2[(SpanKind2['INTERNAL'] = 0)] = 'INTERNAL'; + SpanKind2[(SpanKind2['SERVER'] = 1)] = 'SERVER'; + SpanKind2[(SpanKind2['CLIENT'] = 2)] = 'CLIENT'; + SpanKind2[(SpanKind2['PRODUCER'] = 3)] = 'PRODUCER'; + SpanKind2[(SpanKind2['CONSUMER'] = 4)] = 'CONSUMER'; })(SpanKind || (SpanKind = {})); // node_modules/@opentelemetry/api/build/esm/trace/status.js var SpanStatusCode; -(function(SpanStatusCode2) { - SpanStatusCode2[SpanStatusCode2["UNSET"] = 0] = "UNSET"; - SpanStatusCode2[SpanStatusCode2["OK"] = 1] = "OK"; - SpanStatusCode2[SpanStatusCode2["ERROR"] = 2] = "ERROR"; +(function (SpanStatusCode2) { + SpanStatusCode2[(SpanStatusCode2['UNSET'] = 0)] = 'UNSET'; + SpanStatusCode2[(SpanStatusCode2['OK'] = 1)] = 'OK'; + SpanStatusCode2[(SpanStatusCode2['ERROR'] = 2)] = 'ERROR'; })(SpanStatusCode || (SpanStatusCode = {})); // node_modules/@opentelemetry/api/build/esm/context-api.js var context = ContextAPI.getInstance(); // node_modules/@opentelemetry/api/build/esm/propagation/NoopTextMapPropagator.js -var NoopTextMapPropagator = ( +var NoopTextMapPropagator = /** @class */ - function() { - function NoopTextMapPropagator2() { - } - NoopTextMapPropagator2.prototype.inject = function(_context, _carrier) { - }; - NoopTextMapPropagator2.prototype.extract = function(context2, _carrier) { + (function () { + function NoopTextMapPropagator2() {} + NoopTextMapPropagator2.prototype.inject = function (_context, _carrier) {}; + NoopTextMapPropagator2.prototype.extract = function (context2, _carrier) { return context2; }; - NoopTextMapPropagator2.prototype.fields = function() { + NoopTextMapPropagator2.prototype.fields = function () { return []; }; return NoopTextMapPropagator2; - }() -); + })(); // node_modules/@opentelemetry/api/build/esm/baggage/context-helpers.js -var BAGGAGE_KEY = createContextKey("OpenTelemetry Baggage Key"); +var BAGGAGE_KEY = createContextKey('OpenTelemetry Baggage Key'); function getBaggage(context2) { return context2.getValue(BAGGAGE_KEY) || void 0; } @@ -1039,11 +1148,11 @@ function deleteBaggage(context2) { } // node_modules/@opentelemetry/api/build/esm/api/propagation.js -var API_NAME3 = "propagation"; +var API_NAME3 = 'propagation'; var NOOP_TEXT_MAP_PROPAGATOR = new NoopTextMapPropagator(); -var PropagationAPI = ( +var PropagationAPI = /** @class */ - function() { + (function () { function PropagationAPI2() { this.createBaggage = createBaggage; this.getBaggage = getBaggage; @@ -1051,48 +1160,47 @@ var PropagationAPI = ( this.setBaggage = setBaggage; this.deleteBaggage = deleteBaggage; } - PropagationAPI2.getInstance = function() { + PropagationAPI2.getInstance = function () { if (!this._instance) { this._instance = new PropagationAPI2(); } return this._instance; }; - PropagationAPI2.prototype.setGlobalPropagator = function(propagator) { + PropagationAPI2.prototype.setGlobalPropagator = function (propagator) { return registerGlobal(API_NAME3, propagator, DiagAPI.instance()); }; - PropagationAPI2.prototype.inject = function(context2, carrier, setter) { + PropagationAPI2.prototype.inject = function (context2, carrier, setter) { if (setter === void 0) { setter = defaultTextMapSetter; } return this._getGlobalPropagator().inject(context2, carrier, setter); }; - PropagationAPI2.prototype.extract = function(context2, carrier, getter) { + PropagationAPI2.prototype.extract = function (context2, carrier, getter) { if (getter === void 0) { getter = defaultTextMapGetter; } return this._getGlobalPropagator().extract(context2, carrier, getter); }; - PropagationAPI2.prototype.fields = function() { + PropagationAPI2.prototype.fields = function () { return this._getGlobalPropagator().fields(); }; - PropagationAPI2.prototype.disable = function() { + PropagationAPI2.prototype.disable = function () { unregisterGlobal(API_NAME3, DiagAPI.instance()); }; - PropagationAPI2.prototype._getGlobalPropagator = function() { + PropagationAPI2.prototype._getGlobalPropagator = function () { return getGlobal(API_NAME3) || NOOP_TEXT_MAP_PROPAGATOR; }; return PropagationAPI2; - }() -); + })(); // node_modules/@opentelemetry/api/build/esm/propagation-api.js var propagation = PropagationAPI.getInstance(); // node_modules/@opentelemetry/api/build/esm/api/trace.js -var API_NAME4 = "trace"; -var TraceAPI = ( +var API_NAME4 = 'trace'; +var TraceAPI = /** @class */ - function() { + (function () { function TraceAPI2() { this._proxyTracerProvider = new ProxyTracerProvider(); this.wrapSpanContext = wrapSpanContext; @@ -1104,69 +1212,73 @@ var TraceAPI = ( this.setSpan = setSpan; this.setSpanContext = setSpanContext; } - TraceAPI2.getInstance = function() { + TraceAPI2.getInstance = function () { if (!this._instance) { this._instance = new TraceAPI2(); } return this._instance; }; - TraceAPI2.prototype.setGlobalTracerProvider = function(provider) { - var success = registerGlobal(API_NAME4, this._proxyTracerProvider, DiagAPI.instance()); + TraceAPI2.prototype.setGlobalTracerProvider = function (provider) { + var success = registerGlobal( + API_NAME4, + this._proxyTracerProvider, + DiagAPI.instance(), + ); if (success) { this._proxyTracerProvider.setDelegate(provider); } return success; }; - TraceAPI2.prototype.getTracerProvider = function() { + TraceAPI2.prototype.getTracerProvider = function () { return getGlobal(API_NAME4) || this._proxyTracerProvider; }; - TraceAPI2.prototype.getTracer = function(name, version2) { + TraceAPI2.prototype.getTracer = function (name, version2) { return this.getTracerProvider().getTracer(name, version2); }; - TraceAPI2.prototype.disable = function() { + TraceAPI2.prototype.disable = function () { unregisterGlobal(API_NAME4, DiagAPI.instance()); this._proxyTracerProvider = new ProxyTracerProvider(); }; return TraceAPI2; - }() -); + })(); // node_modules/@opentelemetry/api/build/esm/trace-api.js var trace = TraceAPI.getInstance(); // transport/message.ts -import { Type } from "@sinclair/typebox"; -var TransportMessageSchema = (t) => Type.Object({ - id: Type.String(), - from: Type.String(), - to: Type.String(), - seq: Type.Integer(), - ack: Type.Integer(), - serviceName: Type.Optional(Type.String()), - procedureName: Type.Optional(Type.String()), - streamId: Type.String(), - controlFlags: Type.Integer(), - tracing: Type.Optional( - Type.Object({ - traceparent: Type.String(), - tracestate: Type.String() - }) - ), - payload: t -}); +import { Type } from '@sinclair/typebox'; +var TransportMessageSchema = (t) => + Type.Object({ + id: Type.String(), + from: Type.String(), + to: Type.String(), + seq: Type.Integer(), + ack: Type.Integer(), + serviceName: Type.Optional(Type.String()), + procedureName: Type.Optional(Type.String()), + streamId: Type.String(), + controlFlags: Type.Integer(), + tracing: Type.Optional( + Type.Object({ + traceparent: Type.String(), + tracestate: Type.String(), + }), + ), + payload: t, + }); var ControlMessageAckSchema = Type.Object({ - type: Type.Literal("ACK") + type: Type.Literal('ACK'), }); var ControlMessageCloseSchema = Type.Object({ - type: Type.Literal("CLOSE") + type: Type.Literal('CLOSE'), }); -var currentProtocolVersion = "v2.0"; -var acceptedProtocolVersions = ["v1.1", currentProtocolVersion]; +var currentProtocolVersion = 'v2.0'; +var acceptedProtocolVersions = ['v1.1', currentProtocolVersion]; function isAcceptedProtocolVersion(version2) { return acceptedProtocolVersions.includes(version2); } var ControlMessageHandshakeRequestSchema = Type.Object({ - type: Type.Literal("HANDSHAKE_REQ"), + type: Type.Literal('HANDSHAKE_REQ'), protocolVersion: Type.String(), sessionId: Type.String(), /** @@ -1177,60 +1289,54 @@ var ControlMessageHandshakeRequestSchema = Type.Object({ expectedSessionState: Type.Object({ // what the client expects the server to send next nextExpectedSeq: Type.Integer(), - nextSentSeq: Type.Integer() + nextSentSeq: Type.Integer(), }), - metadata: Type.Optional(Type.Unknown()) + metadata: Type.Optional(Type.Unknown()), }); var HandshakeErrorRetriableResponseCodes = Type.Union([ - Type.Literal("SESSION_STATE_MISMATCH") + Type.Literal('SESSION_STATE_MISMATCH'), ]); var HandshakeErrorCustomHandlerFatalResponseCodes = Type.Union([ // The custom validation handler rejected the handler because the client is unsupported. - Type.Literal("REJECTED_UNSUPPORTED_CLIENT"), + Type.Literal('REJECTED_UNSUPPORTED_CLIENT'), // The custom validation handler rejected the handshake. - Type.Literal("REJECTED_BY_CUSTOM_HANDLER") + Type.Literal('REJECTED_BY_CUSTOM_HANDLER'), ]); var HandshakeErrorFatalResponseCodes = Type.Union([ HandshakeErrorCustomHandlerFatalResponseCodes, // The ciient sent a handshake that doesn't comply with the extended handshake metadata. - Type.Literal("MALFORMED_HANDSHAKE_META"), + Type.Literal('MALFORMED_HANDSHAKE_META'), // The ciient sent a handshake that doesn't comply with ControlMessageHandshakeRequestSchema. - Type.Literal("MALFORMED_HANDSHAKE"), + Type.Literal('MALFORMED_HANDSHAKE'), // The client's protocol version does not match the server's. - Type.Literal("PROTOCOL_VERSION_MISMATCH") + Type.Literal('PROTOCOL_VERSION_MISMATCH'), ]); var HandshakeErrorResponseCodes = Type.Union([ HandshakeErrorRetriableResponseCodes, - HandshakeErrorFatalResponseCodes + HandshakeErrorFatalResponseCodes, ]); var ControlMessageHandshakeResponseSchema = Type.Object({ - type: Type.Literal("HANDSHAKE_RESP"), + type: Type.Literal('HANDSHAKE_RESP'), status: Type.Union([ Type.Object({ ok: Type.Literal(true), - sessionId: Type.String() + sessionId: Type.String(), }), Type.Object({ ok: Type.Literal(false), reason: Type.String(), - code: HandshakeErrorResponseCodes - }) - ]) + code: HandshakeErrorResponseCodes, + }), + ]), }); var ControlMessagePayloadSchema = Type.Union([ ControlMessageCloseSchema, ControlMessageAckSchema, ControlMessageHandshakeRequestSchema, - ControlMessageHandshakeResponseSchema + ControlMessageHandshakeResponseSchema, ]); -var OpaqueTransportMessageSchema = TransportMessageSchema( - Type.Unknown() -); -function handshakeResponseMessage({ - from, - to, - status -}) { +var OpaqueTransportMessageSchema = TransportMessageSchema(Type.Unknown()); +function handshakeResponseMessage({ from, to, status }) { return { id: generateId(), from, @@ -1240,9 +1346,9 @@ function handshakeResponseMessage({ streamId: generateId(), controlFlags: 0, payload: { - type: "HANDSHAKE_RESP", - status - } + type: 'HANDSHAKE_RESP', + status, + }, }; } function closeStreamMessage(streamId) { @@ -1250,36 +1356,36 @@ function closeStreamMessage(streamId) { streamId, controlFlags: 8 /* StreamClosedBit */, payload: { - type: "CLOSE" - } + type: 'CLOSE', + }, }; } function cancelMessage(streamId, payload) { return { streamId, controlFlags: 4 /* StreamCancelBit */, - payload + payload, }; } function isAck(controlFlag) { - return (controlFlag & 1 /* AckBit */) === 1 /* AckBit */; + return (controlFlag & 1) /* AckBit */ === 1 /* AckBit */; } function isStreamOpen(controlFlag) { return ( /* eslint-disable-next-line @typescript-eslint/no-unsafe-enum-comparison */ - (controlFlag & 2 /* StreamOpenBit */) === 2 /* StreamOpenBit */ + (controlFlag & 2) /* StreamOpenBit */ === 2 /* StreamOpenBit */ ); } function isStreamClose(controlFlag) { return ( /* eslint-disable-next-line @typescript-eslint/no-unsafe-enum-comparison */ - (controlFlag & 8 /* StreamClosedBit */) === 8 /* StreamClosedBit */ + (controlFlag & 8) /* StreamClosedBit */ === 8 /* StreamClosedBit */ ); } function isStreamCancel(controlFlag) { return ( /* eslint-disable-next-line @typescript-eslint/no-unsafe-enum-comparison */ - (controlFlag & 4 /* StreamCancelBit */) === 4 /* StreamCancelBit */ + (controlFlag & 4) /* StreamCancelBit */ === 4 /* StreamCancelBit */ ); } @@ -1287,7 +1393,7 @@ function isStreamCancel(controlFlag) { var encoder = new TextEncoder(); var decoder = new TextDecoder(); function uint8ArrayToBase64(uint8Array) { - let binary = ""; + let binary = ''; uint8Array.forEach((byte) => { binary += String.fromCharCode(byte); }); @@ -1308,12 +1414,12 @@ var NaiveJsonCodec = { const val = this[key]; if (val instanceof Uint8Array) { return { $t: uint8ArrayToBase64(val) }; - } else if (typeof val === "bigint") { + } else if (typeof val === 'bigint') { return { $b: val.toString() }; } else { return val; } - }) + }), ); }, fromBuffer: (buff) => { @@ -1327,13 +1433,13 @@ var NaiveJsonCodec = { } else { return val; } - } + }, ); - if (typeof parsed !== "object" || parsed === null) { - throw new Error("unpacked msg is not an object"); + if (typeof parsed !== 'object' || parsed === null) { + throw new Error('unpacked msg is not an object'); } return parsed; - } + }, }; // transport/options.ts @@ -1344,7 +1450,7 @@ var defaultTransportOptions = { connectionTimeoutMs: 2e3, handshakeTimeoutMs: 1e3, enableTransparentSessionReconnects: true, - codec: NaiveJsonCodec + codec: NaiveJsonCodec, }; var defaultConnectionRetryOptions = { baseIntervalMs: 150, @@ -1352,14 +1458,14 @@ var defaultConnectionRetryOptions = { maxBackoffMs: 32e3, attemptBudgetCapacity: 5, budgetRestoreIntervalMs: 200, - isFatalConnectionError: () => false + isFatalConnectionError: () => false, }; var defaultClientTransportOptions = { ...defaultTransportOptions, - ...defaultConnectionRetryOptions + ...defaultConnectionRetryOptions, }; var defaultServerTransportOptions = { - ...defaultTransportOptions + ...defaultTransportOptions, }; // logging/log.ts @@ -1367,7 +1473,7 @@ var LoggingLevels = { debug: -1, info: 0, warn: 1, - error: 2 + error: 2, }; var cleanedLogFn = (log) => { return (msg, metadata) => { @@ -1376,7 +1482,7 @@ var cleanedLogFn = (log) => { if (span) { metadata.telemetry = { traceId: span.spanContext().traceId, - spanId: span.spanContext().spanId + spanId: span.spanContext().spanId, }; } } @@ -1392,28 +1498,28 @@ var cleanedLogFn = (log) => { var BaseLogger = class { minLevel; output; - constructor(output, minLevel = "info") { + constructor(output, minLevel = 'info') { this.minLevel = minLevel; this.output = output; } debug(msg, metadata) { if (LoggingLevels[this.minLevel] <= LoggingLevels.debug) { - this.output(msg, metadata ?? {}, "debug"); + this.output(msg, metadata ?? {}, 'debug'); } } info(msg, metadata) { if (LoggingLevels[this.minLevel] <= LoggingLevels.info) { - this.output(msg, metadata ?? {}, "info"); + this.output(msg, metadata ?? {}, 'info'); } } warn(msg, metadata) { if (LoggingLevels[this.minLevel] <= LoggingLevels.warn) { - this.output(msg, metadata ?? {}, "warn"); + this.output(msg, metadata ?? {}, 'warn'); } } error(msg, metadata) { if (LoggingLevels[this.minLevel] <= LoggingLevels.error) { - this.output(msg, metadata ?? {}, "error"); + this.output(msg, metadata ?? {}, 'error'); } } }; @@ -1421,16 +1527,16 @@ var createLogProxy = (log) => ({ debug: cleanedLogFn(log.debug.bind(log)), info: cleanedLogFn(log.info.bind(log)), warn: cleanedLogFn(log.warn.bind(log)), - error: cleanedLogFn(log.error.bind(log)) + error: cleanedLogFn(log.error.bind(log)), }); // transport/events.ts var ProtocolError = { - RetriesExceeded: "conn_retry_exceeded", - HandshakeFailed: "handshake_failed", - MessageOrderingViolated: "message_ordering_violated", - InvalidMessage: "invalid_message", - MessageSendFailure: "message_send_failure" + RetriesExceeded: 'conn_retry_exceeded', + HandshakeFailed: 'handshake_failed', + MessageOrderingViolated: 'message_ordering_violated', + InvalidMessage: 'invalid_message', + MessageSendFailure: 'message_send_failure', }; var EventDispatcher = class { eventListeners = {}; @@ -1486,16 +1592,16 @@ var StateMachineState = class { this._isConsumed = false; return new Proxy(this, { get(target, prop) { - if (prop === "_isConsumed" || prop === "id" || prop === "state") { + if (prop === '_isConsumed' || prop === 'id' || prop === 'state') { return Reflect.get(target, prop); } - if (prop === "_handleStateExit") { + if (prop === '_handleStateExit') { return () => { target._isConsumed = true; target._handleStateExit(); }; } - if (prop === "_handleClose") { + if (prop === '_handleClose') { return () => { target._isConsumed = true; target._handleStateExit(); @@ -1504,7 +1610,7 @@ var StateMachineState = class { } if (target._isConsumed) { throw new Error( - `${ERR_CONSUMED}: getting ${prop.toString()} on consumed state` + `${ERR_CONSUMED}: getting ${prop.toString()} on consumed state`, ); } return Reflect.get(target, prop); @@ -1512,11 +1618,11 @@ var StateMachineState = class { set(target, prop, value) { if (target._isConsumed) { throw new Error( - `${ERR_CONSUMED}: setting ${prop.toString()} on consumed state` + `${ERR_CONSUMED}: setting ${prop.toString()} on consumed state`, ); } return Reflect.set(target, prop, value); - } + }, }); } }; @@ -1563,7 +1669,7 @@ var IdentifiedSession = class extends CommonSession { telemetry, log, protocolVersion, - seqSent: messagesSent + seqSent: messagesSent, } = props; super(props); this.id = id; @@ -1580,13 +1686,13 @@ var IdentifiedSession = class extends CommonSession { const metadata = { clientId: this.from, connectedTo: this.to, - sessionId: this.id + sessionId: this.id, }; if (this.telemetry.span.isRecording()) { const spanContext = this.telemetry.span.spanContext(); metadata.telemetry = { traceId: spanContext.traceId, - spanId: spanContext.spanId + spanId: spanContext.spanId, }; } return metadata; @@ -1598,7 +1704,7 @@ var IdentifiedSession = class extends CommonSession { to: this.to, from: this.from, seq: this.seq, - ack: this.ack + ack: this.ack, }; this.seq++; return msg; @@ -1611,11 +1717,10 @@ var IdentifiedSession = class extends CommonSession { this.sendBuffer.push(constructedMsg); return { ok: true, - value: constructedMsg.id + value: constructedMsg.id, }; } - _handleStateExit() { - } + _handleStateExit() {} _handleClose() { this.sendBuffer.length = 0; this.telemetry.span.end(); @@ -1653,18 +1758,18 @@ function sendMessage(conn, codec, msg) { if (!sent) { return { ok: false, - reason: "failed to send message" + reason: 'failed to send message', }; } return { ok: true, - value: msg.id + value: msg.id, }; } // transport/sessionStateMachine/SessionConnecting.ts var SessionConnecting = class extends IdentifiedSessionWithGracePeriod { - state = "Connecting" /* Connecting */; + state = 'Connecting' /* Connecting */; connPromise; listeners; connectionTimeout; @@ -1680,7 +1785,7 @@ var SessionConnecting = class extends IdentifiedSessionWithGracePeriod { (err) => { if (this._isConsumed) return; this.listeners.onConnectionFailed(err); - } + }, ); this.connectionTimeout = setTimeout(() => { this.listeners.onConnectionTimeout(); @@ -1691,17 +1796,18 @@ var SessionConnecting = class extends IdentifiedSessionWithGracePeriod { bestEffortClose() { const logger = this.log; const metadata = this.loggingMetadata; - this.connPromise.then((conn) => { - conn.close(); - logger?.info( - "connection eventually resolved but session has transitioned, closed connection", - { - ...metadata, - ...conn.loggingMetadata - } - ); - }).catch(() => { - }); + this.connPromise + .then((conn) => { + conn.close(); + logger?.info( + 'connection eventually resolved but session has transitioned, closed connection', + { + ...metadata, + ...conn.loggingMetadata, + }, + ); + }) + .catch(() => {}); } _handleStateExit() { super._handleStateExit(); @@ -1718,7 +1824,7 @@ var SessionConnecting = class extends IdentifiedSessionWithGracePeriod { // transport/sessionStateMachine/SessionNoConnection.ts var SessionNoConnection = class extends IdentifiedSessionWithGracePeriod { - state = "NoConnection" /* NoConnection */; + state = 'NoConnection' /* NoConnection */; _handleClose() { super._handleClose(); } @@ -1728,24 +1834,22 @@ var SessionNoConnection = class extends IdentifiedSessionWithGracePeriod { }; // router/services.ts -import { Type as Type3, Kind as Kind2 } from "@sinclair/typebox"; +import { Type as Type3, Kind as Kind2 } from '@sinclair/typebox'; // router/errors.ts -import { - Kind, - Type as Type2 -} from "@sinclair/typebox"; -var UNCAUGHT_ERROR_CODE = "UNCAUGHT_ERROR"; -var UNEXPECTED_DISCONNECT_CODE = "UNEXPECTED_DISCONNECT"; -var INVALID_REQUEST_CODE = "INVALID_REQUEST"; -var CANCEL_CODE = "CANCEL"; -var ErrResultSchema = (t) => Type2.Object({ - ok: Type2.Literal(false), - payload: t -}); +import { Kind, Type as Type2 } from '@sinclair/typebox'; +var UNCAUGHT_ERROR_CODE = 'UNCAUGHT_ERROR'; +var UNEXPECTED_DISCONNECT_CODE = 'UNEXPECTED_DISCONNECT'; +var INVALID_REQUEST_CODE = 'INVALID_REQUEST'; +var CANCEL_CODE = 'CANCEL'; +var ErrResultSchema = (t) => + Type2.Object({ + ok: Type2.Literal(false), + payload: t, + }); var ValidationErrorDetails = Type2.Object({ path: Type2.String(), - message: Type2.String() + message: Type2.String(), }); var ValidationErrors = Type2.Array(ValidationErrorDetails); function castTypeboxValueErrors(errors) { @@ -1753,24 +1857,24 @@ function castTypeboxValueErrors(errors) { for (const error of errors) { result.push({ path: error.path, - message: error.message + message: error.message, }); } return result; } var CancelErrorSchema = Type2.Object({ code: Type2.Literal(CANCEL_CODE), - message: Type2.String() + message: Type2.String(), }); var CancelResultSchema = ErrResultSchema(CancelErrorSchema); var ReaderErrorSchema = Type2.Union([ Type2.Object({ code: Type2.Literal(UNCAUGHT_ERROR_CODE), - message: Type2.String() + message: Type2.String(), }), Type2.Object({ code: Type2.Literal(UNEXPECTED_DISCONNECT_CODE), - message: Type2.String() + message: Type2.String(), }), Type2.Object({ code: Type2.Literal(INVALID_REQUEST_CODE), @@ -1778,15 +1882,15 @@ var ReaderErrorSchema = Type2.Union([ extras: Type2.Optional( Type2.Object({ firstValidationErrors: Type2.Array(ValidationErrorDetails), - totalErrors: Type2.Number() - }) - ) + totalErrors: Type2.Number(), + }), + ), }), - CancelErrorSchema + CancelErrorSchema, ]); var ReaderErrorResultSchema = ErrResultSchema(ReaderErrorSchema); function isUnion(schema) { - return schema[Kind] === "Union"; + return schema[Kind] === 'Union'; } function flattenErrorType(errType) { if (!isUnion(errType)) { @@ -1889,9 +1993,12 @@ function createServiceSchema() { static define(configOrProcedures, maybeProcedures) { let config; let procedures; - if ("initializeState" in configOrProcedures && typeof configOrProcedures.initializeState === "function") { + if ( + 'initializeState' in configOrProcedures && + typeof configOrProcedures.initializeState === 'function' + ) { if (!maybeProcedures) { - throw new Error("Expected procedures to be defined"); + throw new Error('Expected procedures to be defined'); } config = configOrProcedures; procedures = maybeProcedures; @@ -1914,15 +2021,19 @@ function createServiceSchema() { output: Strict(procDef.responseData), errors: getSerializedProcErrors(procDef), // Only add `description` field if the type declares it. - ..."description" in procDef ? { description: procDef.description } : {}, + ...('description' in procDef + ? { description: procDef.description } + : {}), type: procDef.type, // Only add the `input` field if the type declares it. - ..."requestData" in procDef ? { - input: Strict(procDef.requestData) - } : {} - } - ]) - ) + ...('requestData' in procDef + ? { + input: Strict(procDef.requestData), + } + : {}), + }, + ]), + ), }; } // TODO remove once clients migrate to v2 @@ -1934,38 +2045,40 @@ function createServiceSchema() { serializeV1Compat() { return { procedures: Object.fromEntries( - Object.entries(this.procedures).map( - ([procName, procDef]) => { - if (procDef.type === "rpc" || procDef.type === "subscription") { - return [ - procName, - { - // BACKWARDS COMPAT: map init to input for protocolv1 - // this is the only change needed to make it compatible. - input: Strict(procDef.requestInit), - output: Strict(procDef.responseData), - errors: getSerializedProcErrors(procDef), - // Only add `description` field if the type declares it. - ..."description" in procDef ? { description: procDef.description } : {}, - type: procDef.type - } - ]; - } + Object.entries(this.procedures).map(([procName, procDef]) => { + if (procDef.type === 'rpc' || procDef.type === 'subscription') { return [ procName, { - init: Strict(procDef.requestInit), + // BACKWARDS COMPAT: map init to input for protocolv1 + // this is the only change needed to make it compatible. + input: Strict(procDef.requestInit), output: Strict(procDef.responseData), errors: getSerializedProcErrors(procDef), // Only add `description` field if the type declares it. - ..."description" in procDef ? { description: procDef.description } : {}, + ...('description' in procDef + ? { description: procDef.description } + : {}), type: procDef.type, - input: Strict(procDef.requestData) - } + }, ]; } - ) - ) + return [ + procName, + { + init: Strict(procDef.requestInit), + output: Strict(procDef.responseData), + errors: getSerializedProcErrors(procDef), + // Only add `description` field if the type declares it. + ...('description' in procDef + ? { description: procDef.description } + : {}), + type: procDef.type, + input: Strict(procDef.requestData), + }, + ]; + }), + ), }; } /** @@ -1983,17 +2096,20 @@ function createServiceSchema() { return Object.freeze({ state, procedures: this.procedures, - [Symbol.asyncDispose]: dispose + [Symbol.asyncDispose]: dispose, }); } }; } function getSerializedProcErrors(procDef) { - if (!("responseError" in procDef) || procDef.responseError[Kind2] === "Never") { + if ( + !('responseError' in procDef) || + procDef.responseError[Kind2] === 'Never' + ) { return Strict(ReaderErrorSchema); } const withProtocolErrors = flattenErrorType( - Type3.Union([procDef.responseError, ReaderErrorSchema]) + Type3.Union([procDef.responseError, ReaderErrorSchema]), ); return Strict(withProtocolErrors); } @@ -2050,46 +2166,43 @@ var ServiceScaffold = class { * ``` */ finalize(procedures) { - return createServiceSchema().define( - this.config, - procedures - ); + return createServiceSchema().define(this.config, procedures); } }; // router/result.ts -import { Type as Type4 } from "@sinclair/typebox"; +import { Type as Type4 } from '@sinclair/typebox'; var AnyResultSchema = Type4.Union([ Type4.Object({ ok: Type4.Literal(false), payload: Type4.Object({ code: Type4.String(), message: Type4.String(), - extras: Type4.Optional(Type4.Unknown()) - }) + extras: Type4.Optional(Type4.Unknown()), + }), }), Type4.Object({ ok: Type4.Literal(true), - payload: Type4.Unknown() - }) + payload: Type4.Unknown(), + }), ]); function Ok(payload) { return { ok: true, - payload + payload, }; } function Err(error) { return { ok: false, - payload: error + payload: error, }; } // router/streams.ts var ReadableBrokenError = { - code: "READABLE_BROKEN", - message: "Readable was broken before it is fully consumed" + code: 'READABLE_BROKEN', + message: 'Readable was broken before it is fully consumed', }; function createPromiseWithResolvers() { let resolve; @@ -2103,7 +2216,7 @@ function createPromiseWithResolvers() { // @ts-expect-error promise callbacks are sync resolve, // @ts-expect-error promise callbacks are sync - reject + reject, }; } var ReadableImpl = class { @@ -2147,7 +2260,7 @@ var ReadableImpl = class { */ [Symbol.asyncIterator]() { if (this.locked) { - throw new TypeError("Readable is already locked"); + throw new TypeError('Readable is already locked'); } this.locked = true; let didSignalBreak = false; @@ -2156,21 +2269,21 @@ var ReadableImpl = class { if (didSignalBreak) { return { done: true, - value: void 0 + value: void 0, }; } while (this.queue.length === 0) { if (this.closed && !this.brokenWithValuesLeftToRead) { return { done: true, - value: void 0 + value: void 0, }; } if (this.broken) { didSignalBreak = true; return { done: false, - value: Err(ReadableBrokenError) + value: Err(ReadableBrokenError), }; } if (!this.next) { @@ -2185,7 +2298,7 @@ var ReadableImpl = class { return: async () => { this.break(); return { done: true, value: void 0 }; - } + }, }; } /** @@ -2231,7 +2344,7 @@ var ReadableImpl = class { return; } if (this.closed) { - throw new Error("Cannot push to closed Readable"); + throw new Error('Cannot push to closed Readable'); } this.queue.push(value); this.next?.resolve(); @@ -2242,7 +2355,7 @@ var ReadableImpl = class { */ _triggerClose() { if (this.closed) { - throw new Error("Unexpected closing multiple times"); + throw new Error('Unexpected closing multiple times'); } this.closed = true; this.next?.resolve(); @@ -2279,7 +2392,7 @@ var WritableImpl = class { } write(value) { if (this.closed) { - throw new Error("Cannot write to closed Writable"); + throw new Error('Cannot write to closed Writable'); } this.writeCb(value); } @@ -2307,21 +2420,21 @@ var WritableImpl = class { }; // router/procedures.ts -import { Type as Type5 } from "@sinclair/typebox"; +import { Type as Type5 } from '@sinclair/typebox'; function rpc({ requestInit, responseData, responseError = Type5.Never(), description, - handler + handler, }) { return { - ...description ? { description } : {}, - type: "rpc", + ...(description ? { description } : {}), + type: 'rpc', requestInit, responseData, responseError, - handler + handler, }; } function upload({ @@ -2330,16 +2443,16 @@ function upload({ responseData, responseError = Type5.Never(), description, - handler + handler, }) { return { - type: "upload", - ...description ? { description } : {}, + type: 'upload', + ...(description ? { description } : {}), requestInit, requestData, responseData, responseError, - handler + handler, }; } function subscription({ @@ -2347,15 +2460,15 @@ function subscription({ responseData, responseError = Type5.Never(), description, - handler + handler, }) { return { - type: "subscription", - ...description ? { description } : {}, + type: 'subscription', + ...(description ? { description } : {}), requestInit, responseData, responseError, - handler + handler, }; } function stream({ @@ -2364,32 +2477,32 @@ function stream({ responseData, responseError = Type5.Never(), description, - handler + handler, }) { return { - type: "stream", - ...description ? { description } : {}, + type: 'stream', + ...(description ? { description } : {}), requestInit, requestData, responseData, responseError, - handler + handler, }; } var Procedure = { rpc, upload, subscription, - stream + stream, }; // router/server.ts -import { Value } from "@sinclair/typebox/value"; +import { Value } from '@sinclair/typebox/value'; // transport/stringifyError.ts function coerceErrorString(err) { if (err instanceof Error) { - return err.message || "unknown reason"; + return err.message || 'unknown reason'; } return `[coerced to error] ${String(err)}`; } @@ -2412,7 +2525,14 @@ var RiverServer = class { streams; services; unregisterTransportListeners; - constructor(transport, services2, handshakeOptions, extendedContext, maxCancelledStreamTombstonesPerSession = 200, middlewares = []) { + constructor( + transport, + services2, + handshakeOptions, + extendedContext, + maxCancelledStreamTombstonesPerSession = 200, + middlewares = [], + ) { const instances = {}; this.middlewares = middlewares; this.services = instances; @@ -2423,7 +2543,7 @@ var RiverServer = class { instances[name] = instance; this.contextMap.set(instance, { ...extendedContext, - state: instance.state + state: instance.state, }); } if (handshakeOptions) { @@ -2432,7 +2552,8 @@ var RiverServer = class { this.transport = transport; this.streams = /* @__PURE__ */ new Map(); this.serverCancelledStreams = /* @__PURE__ */ new Map(); - this.maxCancelledStreamTombstonesPerSession = maxCancelledStreamTombstonesPerSession; + this.maxCancelledStreamTombstonesPerSession = + maxCancelledStreamTombstonesPerSession; this.log = transport.log; const handleCreatingNewStreams = (message) => { if (message.to !== this.transport.clientId) { @@ -2440,8 +2561,8 @@ var RiverServer = class { `got msg with destination that isn't this server, ignoring`, { clientId: this.transport.clientId, - transportMessage: message - } + transportMessage: message, + }, ); return; } @@ -2468,15 +2589,15 @@ var RiverServer = class { newStreamProps.tracingCtx, (span) => { this.createNewProcStream(span, newStreamProps); - } + }, ); }; const handleSessionStatus = (evt) => { - if (evt.status !== "closing") return; + if (evt.status !== 'closing') return; const disconnectedClientId = evt.session.to; this.log?.info( `got session disconnect from ${disconnectedClientId}, cleaning up streams`, - evt.session.loggingMetadata + evt.session.loggingMetadata, ); for (const stream2 of this.streams.values()) { if (stream2.from === disconnectedClientId) { @@ -2486,20 +2607,20 @@ var RiverServer = class { this.serverCancelledStreams.delete(disconnectedClientId); }; const handleTransportStatus = (evt) => { - if (evt.status !== "closed") return; + if (evt.status !== 'closed') return; this.unregisterTransportListeners(); }; this.unregisterTransportListeners = () => { - this.transport.removeEventListener("message", handleCreatingNewStreams); - this.transport.removeEventListener("sessionStatus", handleSessionStatus); + this.transport.removeEventListener('message', handleCreatingNewStreams); + this.transport.removeEventListener('sessionStatus', handleSessionStatus); this.transport.removeEventListener( - "transportStatus", - handleTransportStatus + 'transportStatus', + handleTransportStatus, ); }; - this.transport.addEventListener("message", handleCreatingNewStreams); - this.transport.addEventListener("sessionStatus", handleSessionStatus); - this.transport.addEventListener("transportStatus", handleTransportStatus); + this.transport.addEventListener('message', handleCreatingNewStreams); + this.transport.addEventListener('sessionStatus', handleSessionStatus); + this.transport.addEventListener('transportStatus', handleTransportStatus); } createNewProcStream(span, props) { const { @@ -2512,25 +2633,25 @@ var RiverServer = class { serviceContext, initPayload, procClosesWithInit, - passInitAsDataForBackwardsCompat + passInitAsDataForBackwardsCompat, } = props; const { to: from, loggingMetadata, protocolVersion, - id: sessionId + id: sessionId, } = initialSession; loggingMetadata.telemetry = { traceId: span.spanContext().traceId, - spanId: span.spanContext().spanId + spanId: span.spanContext().spanId, }; let cleanClose = true; const onMessage = (msg) => { if (msg.from !== from) { - this.log?.error("got stream message from unexpected client", { + this.log?.error('got stream message from unexpected client', { ...loggingMetadata, transportMessage: msg, - tags: ["invariant-violation"] + tags: ['invariant-violation'], }); return; } @@ -2541,15 +2662,15 @@ var RiverServer = class { } else { cancelResult = Err({ code: CANCEL_CODE, - message: "stream cancelled, client sent invalid payload" + message: 'stream cancelled, client sent invalid payload', }); - this.log?.warn("got stream cancel without a valid protocol error", { + this.log?.warn('got stream cancel without a valid protocol error', { ...loggingMetadata, transportMessage: msg, validationErrors: [ - ...Value.Errors(CancelResultSchema, msg.payload) + ...Value.Errors(CancelResultSchema, msg.payload), ], - tags: ["invalid-request"] + tags: ['invalid-request'], }); } if (!reqReadable.isClosed()) { @@ -2560,57 +2681,63 @@ var RiverServer = class { return; } if (reqReadable.isClosed()) { - this.log?.warn("received message after request stream is closed", { + this.log?.warn('received message after request stream is closed', { ...loggingMetadata, transportMessage: msg, - tags: ["invalid-request"] + tags: ['invalid-request'], }); onServerCancel({ code: INVALID_REQUEST_CODE, - message: "received message after request stream is closed" + message: 'received message after request stream is closed', }); return; } - if ("requestData" in procedure && Value.Check(procedure.requestData, msg.payload)) { + if ( + 'requestData' in procedure && + Value.Check(procedure.requestData, msg.payload) + ) { reqReadable._pushValue(Ok(msg.payload)); if (isStreamCloseBackwardsCompat(msg.controlFlags, protocolVersion)) { closeReadable(); } return; } - if (Value.Check(ControlMessagePayloadSchema, msg.payload) && isStreamCloseBackwardsCompat(msg.controlFlags, protocolVersion)) { + if ( + Value.Check(ControlMessagePayloadSchema, msg.payload) && + isStreamCloseBackwardsCompat(msg.controlFlags, protocolVersion) + ) { closeReadable(); return; } let validationErrors; let errMessage; - if ("requestData" in procedure) { - errMessage = "message in requestData position did not match schema"; + if ('requestData' in procedure) { + errMessage = 'message in requestData position did not match schema'; validationErrors = castTypeboxValueErrors( - Value.Errors(procedure.requestData, msg.payload) + Value.Errors(procedure.requestData, msg.payload), ); } else { validationErrors = castTypeboxValueErrors( - Value.Errors(ControlMessagePayloadSchema, msg.payload) + Value.Errors(ControlMessagePayloadSchema, msg.payload), ); - errMessage = "message in control payload position did not match schema"; + errMessage = 'message in control payload position did not match schema'; } this.log?.warn(errMessage, { ...loggingMetadata, transportMessage: msg, validationErrors: validationErrors.map((error) => ({ path: error.path, - message: error.message + message: error.message, })), - tags: ["invalid-request"] + tags: ['invalid-request'], }); onServerCancel({ code: INVALID_REQUEST_CODE, message: errMessage, extras: { totalErrors: validationErrors.length, - firstValidationErrors: validationErrors.slice(0, 5) - } + firstValidationErrors: validationErrors.slice(0, 5), + }, }); }; const finishedController = new AbortController(); @@ -2626,18 +2753,18 @@ var RiverServer = class { cleanClose = false; const errPayload = { code: UNEXPECTED_DISCONNECT_CODE, - message: "client unexpectedly disconnected" + message: 'client unexpectedly disconnected', }; if (!reqReadable.isClosed()) { reqReadable._pushValue(Err(errPayload)); closeReadable(); } resWritable.close(); - } + }, }; const sessionScopedSend = this.transport.getSessionBoundSendFn( from, - sessionId + sessionId, ); const cancelStream = (streamId2, payload) => { this.cancelStream(from, sessionScopedSend, streamId2, payload); @@ -2660,11 +2787,12 @@ var RiverServer = class { finishedController.abort(); this.streams.delete(streamId); }; - const procClosesWithResponse = procedure.type === "rpc" || procedure.type === "upload"; + const procClosesWithResponse = + procedure.type === 'rpc' || procedure.type === 'upload'; const reqReadable = new ReadableImpl(); const closeReadable = () => { reqReadable._triggerClose(); - if (protocolVersion === "v1.1") { + if (protocolVersion === 'v1.1') { if (!procClosesWithResponse && !resWritable.isClosed()) { resWritable.close(); } @@ -2683,8 +2811,10 @@ var RiverServer = class { } sessionScopedSend({ streamId, - controlFlags: procClosesWithResponse ? getStreamCloseBackwardsCompat(protocolVersion) : 0, - payload: response + controlFlags: procClosesWithResponse + ? getStreamCloseBackwardsCompat(protocolVersion) + : 0, + payload: response, }); if (procClosesWithResponse) { resWritable.close(); @@ -2697,7 +2827,7 @@ var RiverServer = class { message.controlFlags = getStreamCloseBackwardsCompat(protocolVersion); sessionScopedSend(message); } - if (protocolVersion === "v1.1") { + if (protocolVersion === 'v1.1') { if (!reqReadable.isClosed()) { closeReadable(); } @@ -2705,7 +2835,7 @@ var RiverServer = class { if (reqReadable.isClosed()) { cleanup(); } - } + }, }); const onHandlerError = (err, span2) => { const errorMsg = coerceErrorString(err); @@ -2716,18 +2846,18 @@ var RiverServer = class { ...loggingMetadata, transportMessage: { procedureName, - serviceName + serviceName, }, extras: { error: errorMsg, - originalException: err + originalException: err, }, - tags: ["uncaught-handler-error"] - } + tags: ['uncaught-handler-error'], + }, ); onServerCancel({ code: UNCAUGHT_ERROR_CODE, - message: errorMsg + message: errorMsg, }); }; if (procClosesWithInit) { @@ -2742,12 +2872,12 @@ var RiverServer = class { cancel: (message) => { const errRes = { code: CANCEL_CODE, - message: message ?? "cancelled by server procedure handler" + message: message ?? 'cancelled by server procedure handler', }; onServerCancel(errRes); return Err(errRes); }, - signal: finishedController.signal + signal: finishedController.signal, }; const middlewareContext = { ...serviceContext, @@ -2758,15 +2888,15 @@ var RiverServer = class { signal: finishedController.signal, streamId, procedureName, - serviceName + serviceName, }; const runProcedureHandler = async () => { switch (procedure.type) { - case "rpc": + case 'rpc': try { const responsePayload = await procedure.handler({ ctx: handlerContextWithSpan, - reqInit: initPayload + reqInit: initPayload, }); if (resWritable.isClosed()) { return; @@ -2778,13 +2908,13 @@ var RiverServer = class { span.end(); } break; - case "stream": + case 'stream': try { await procedure.handler({ ctx: handlerContextWithSpan, reqInit: initPayload, reqReadable, - resWritable + resWritable, }); } catch (err) { onHandlerError(err, span); @@ -2792,12 +2922,12 @@ var RiverServer = class { span.end(); } break; - case "subscription": + case 'subscription': try { await procedure.handler({ ctx: handlerContextWithSpan, reqInit: initPayload, - resWritable + resWritable, }); } catch (err) { onHandlerError(err, span); @@ -2805,12 +2935,12 @@ var RiverServer = class { span.end(); } break; - case "upload": + case 'upload': try { const responsePayload = await procedure.handler({ ctx: handlerContextWithSpan, reqInit: initPayload, - reqReadable + reqReadable, }); if (resWritable.isClosed()) { return; @@ -2830,13 +2960,13 @@ var RiverServer = class { middleware({ ctx: middlewareContext, reqInit: initPayload, - next + next, }); }; }, () => { void runProcedureHandler(); - } + }, )(); if (!finishedController.signal.aborted) { this.streams.set(streamId, procStream); @@ -2848,7 +2978,7 @@ var RiverServer = class { const err = `no context found for ${serviceName}`; this.log?.error(err, { clientId: this.transport.clientId, - tags: ["invariant-violation"] + tags: ['invariant-violation'], }); throw new Error(err); } @@ -2860,32 +2990,32 @@ var RiverServer = class { this.log?.error(`couldn't find session for ${initMessage.from}`, { clientId: this.transport.clientId, transportMessage: initMessage, - tags: ["invariant-violation"] + tags: ['invariant-violation'], }); return null; } const sessionScopedSend = this.transport.getSessionBoundSendFn( initMessage.from, - session.id + session.id, ); const cancelStream = (streamId, payload) => { this.cancelStream(initMessage.from, sessionScopedSend, streamId, payload); }; const sessionMetadata = this.transport.sessionHandshakeMetadata.get( - session.to + session.to, ); if (!sessionMetadata) { const errMessage = `session doesn't have handshake metadata`; this.log?.error(errMessage, { ...session.loggingMetadata, - tags: ["invariant-violation"] + tags: ['invariant-violation'], }); cancelStream( initMessage.streamId, Err({ code: UNCAUGHT_ERROR_CODE, - message: errMessage - }) + message: errMessage, + }), ); return null; } @@ -2895,14 +3025,14 @@ var RiverServer = class { ...session.loggingMetadata, clientId: this.transport.clientId, transportMessage: initMessage, - tags: ["invalid-request"] + tags: ['invalid-request'], }); cancelStream( initMessage.streamId, Err({ code: INVALID_REQUEST_CODE, - message: errMessage - }) + message: errMessage, + }), ); return null; } @@ -2911,14 +3041,14 @@ var RiverServer = class { this.log?.warn(errMessage, { ...session.loggingMetadata, transportMessage: initMessage, - tags: ["invalid-request"] + tags: ['invalid-request'], }); cancelStream( initMessage.streamId, Err({ code: INVALID_REQUEST_CODE, - message: errMessage - }) + message: errMessage, + }), ); return null; } @@ -2927,14 +3057,14 @@ var RiverServer = class { this.log?.warn(errMessage, { ...session.loggingMetadata, transportMessage: initMessage, - tags: ["invalid-request"] + tags: ['invalid-request'], }); cancelStream( initMessage.streamId, Err({ code: INVALID_REQUEST_CODE, - message: errMessage - }) + message: errMessage, + }), ); return null; } @@ -2944,14 +3074,14 @@ var RiverServer = class { ...session.loggingMetadata, clientId: this.transport.clientId, transportMessage: initMessage, - tags: ["invalid-request"] + tags: ['invalid-request'], }); cancelStream( initMessage.streamId, Err({ code: INVALID_REQUEST_CODE, - message: errMessage - }) + message: errMessage, + }), ); return null; } @@ -2961,32 +3091,37 @@ var RiverServer = class { this.log?.warn(errMessage, { ...session.loggingMetadata, transportMessage: initMessage, - tags: ["invalid-request"] + tags: ['invalid-request'], }); cancelStream( initMessage.streamId, Err({ code: INVALID_REQUEST_CODE, - message: errMessage - }) + message: errMessage, + }), ); return null; } const serviceContext = this.getContext(service, initMessage.serviceName); const procedure = service.procedures[initMessage.procedureName]; - if (!["rpc", "upload", "stream", "subscription"].includes(procedure.type)) { + if (!['rpc', 'upload', 'stream', 'subscription'].includes(procedure.type)) { this.log?.error( `got request for invalid procedure type ${procedure.type} at ${initMessage.serviceName}.${initMessage.procedureName}`, { ...session.loggingMetadata, transportMessage: initMessage, - tags: ["invariant-violation"] - } + tags: ['invariant-violation'], + }, ); return null; } let passInitAsDataForBackwardsCompat = false; - if (session.protocolVersion === "v1.1" && (procedure.type === "upload" || procedure.type === "stream") && Value.Check(procedure.requestData, initMessage.payload) && Value.Check(procedure.requestInit, {})) { + if ( + session.protocolVersion === 'v1.1' && + (procedure.type === 'upload' || procedure.type === 'stream') && + Value.Check(procedure.requestData, initMessage.payload) && + Value.Check(procedure.requestInit, {}) + ) { passInitAsDataForBackwardsCompat = true; } else if (!Value.Check(procedure.requestInit, initMessage.payload)) { const errMessage = `procedure init failed validation`; @@ -2994,14 +3129,14 @@ var RiverServer = class { ...session.loggingMetadata, clientId: this.transport.clientId, transportMessage: initMessage, - tags: ["invalid-request"] + tags: ['invalid-request'], }); cancelStream( initMessage.streamId, Err({ code: INVALID_REQUEST_CODE, - message: errMessage - }) + message: errMessage, + }), ); return null; } @@ -3017,16 +3152,16 @@ var RiverServer = class { serviceContext, procClosesWithInit: isStreamCloseBackwardsCompat( initMessage.controlFlags, - session.protocolVersion + session.protocolVersion, ), - passInitAsDataForBackwardsCompat + passInitAsDataForBackwardsCompat, }; } cancelStream(to, sessionScopedSend, streamId, payload) { let cancelledStreamsInSession = this.serverCancelledStreams.get(to); if (!cancelledStreamsInSession) { cancelledStreamsInSession = new LRUSet( - this.maxCancelledStreamTombstonesPerSession + this.maxCancelledStreamTombstonesPerSession, ); this.serverCancelledStreams.set(to, cancelledStreamsInSession); } @@ -3065,19 +3200,19 @@ var LRUSet = class { } }; function isStreamCancelBackwardsCompat(controlFlags, protocolVersion) { - if (protocolVersion === "v1.1") { + if (protocolVersion === 'v1.1') { return false; } return isStreamCancel(controlFlags); } function isStreamCloseBackwardsCompat(controlFlags, protocolVersion) { - if (protocolVersion === "v1.1") { + if (protocolVersion === 'v1.1') { return isStreamCancel(controlFlags); } return isStreamClose(controlFlags); } function getStreamCloseBackwardsCompat(protocolVersion) { - if (protocolVersion === "v1.1") { + if (protocolVersion === 'v1.1') { return 4 /* StreamCancelBit */; } return 8 /* StreamClosedBit */; @@ -3089,7 +3224,7 @@ function createServer(transport, services2, providedServerOptions) { providedServerOptions?.handshakeOptions, providedServerOptions?.extendedContext, providedServerOptions?.maxCancelledStreamTombstonesPerSession, - providedServerOptions?.middlewares + providedServerOptions?.middlewares, ); } @@ -3099,22 +3234,30 @@ function createServerHandshakeOptions(schema, validate) { } // package.json -var version = "0.212.2"; +var version = '0.212.2'; // tracing/index.ts -function createSessionTelemetryInfo(tracer, sessionId, to, from, propagationCtx) { - const parentCtx = propagationCtx ? propagation.extract(context.active(), propagationCtx) : context.active(); +function createSessionTelemetryInfo( + tracer, + sessionId, + to, + from, + propagationCtx, +) { + const parentCtx = propagationCtx + ? propagation.extract(context.active(), propagationCtx) + : context.active(); const span = tracer.startSpan( `river.session`, { attributes: { - component: "river", - "river.session.id": sessionId, - "river.session.to": to, - "river.session.from": from - } + component: 'river', + 'river.session.id': sessionId, + 'river.session.to': to, + 'river.session.from': from, + }, }, - parentCtx + parentCtx, ); const ctx = trace.setSpan(parentCtx, span); return { span, ctx }; @@ -3124,53 +3267,64 @@ function createConnectionTelemetryInfo(tracer, connection, info) { `river.connection`, { attributes: { - component: "river", - "river.connection.id": connection.id + component: 'river', + 'river.connection.id': connection.id, }, - links: [{ context: info.span.spanContext() }] + links: [{ context: info.span.spanContext() }], }, - info.ctx + info.ctx, ); const ctx = trace.setSpan(info.ctx, span); return { span, ctx }; } -function createHandlerSpan(tracer, session, kind, serviceName, procedureName, streamId, tracing, fn) { - const ctx = tracing ? propagation.extract(context.active(), tracing) : context.active(); +function createHandlerSpan( + tracer, + session, + kind, + serviceName, + procedureName, + streamId, + tracing, + fn, +) { + const ctx = tracing + ? propagation.extract(context.active(), tracing) + : context.active(); return tracer.startActiveSpan( `river.server.${serviceName}.${procedureName}`, { attributes: { - component: "river", - "river.method.kind": kind, - "river.method.service": serviceName, - "river.method.name": procedureName, - "river.streamId": streamId, - "span.kind": "server" + component: 'river', + 'river.method.kind': kind, + 'river.method.service': serviceName, + 'river.method.name': procedureName, + 'river.streamId': streamId, + 'span.kind': 'server', }, links: [{ context: session.telemetry.span.spanContext() }], - kind: SpanKind.SERVER + kind: SpanKind.SERVER, }, ctx, - fn + fn, ); } function recordRiverError(span, error) { span.setStatus({ code: SpanStatusCode.ERROR, - message: error.message + message: error.message, }); span.setAttributes({ - "river.error_code": error.code, - "river.error_message": error.message + 'river.error_code': error.code, + 'river.error_message': error.message, }); } function getTracer() { - return trace.getTracer("river", version); + return trace.getTracer('river', version); } // transport/sessionStateMachine/SessionWaitingForHandshake.ts var SessionWaitingForHandshake = class extends CommonSession { - state = "WaitingForHandshake" /* WaitingForHandshake */; + state = 'WaitingForHandshake' /* WaitingForHandshake */; conn; listeners; handshakeTimeout; @@ -3189,7 +3343,7 @@ var SessionWaitingForHandshake = class extends CommonSession { return { clientId: this.from, connId: this.conn.id, - ...this.conn.loggingMetadata + ...this.conn.loggingMetadata, }; } onHandshakeData = (msg) => { @@ -3197,7 +3351,7 @@ var SessionWaitingForHandshake = class extends CommonSession { if (!parsedMsgRes.ok) { this.listeners.onInvalidHandshake( `could not parse handshake message: ${parsedMsgRes.reason}`, - "MALFORMED_HANDSHAKE" + 'MALFORMED_HANDSHAKE', ); return; } @@ -3220,7 +3374,7 @@ var SessionWaitingForHandshake = class extends CommonSession { // transport/sessionStateMachine/SessionHandshaking.ts var SessionHandshaking = class extends IdentifiedSessionWithGracePeriod { - state = "Handshaking" /* Handshaking */; + state = 'Handshaking' /* Handshaking */; conn; listeners; handshakeTimeout; @@ -3238,7 +3392,7 @@ var SessionHandshaking = class extends IdentifiedSessionWithGracePeriod { get loggingMetadata() { return { ...super.loggingMetadata, - ...this.conn.loggingMetadata + ...this.conn.loggingMetadata, }; } onHandshakeData = (msg) => { @@ -3246,7 +3400,7 @@ var SessionHandshaking = class extends IdentifiedSessionWithGracePeriod { if (!parsedMsgRes.ok) { this.listeners.onInvalidHandshake( `could not parse handshake message: ${parsedMsgRes.reason}`, - "MALFORMED_HANDSHAKE" + 'MALFORMED_HANDSHAKE', ); return; } @@ -3273,7 +3427,7 @@ var SessionHandshaking = class extends IdentifiedSessionWithGracePeriod { // transport/sessionStateMachine/SessionConnected.ts var SessionConnected = class extends IdentifiedSession { - state = "Connected" /* Connected */; + state = 'Connected' /* Connected */; conn; listeners; heartbeatHandle; @@ -3293,7 +3447,7 @@ var SessionConnected = class extends IdentifiedSession { this.log?.error(msg, { ...this.loggingMetadata, transportMessage: constructedMsg, - tags: ["invariant-violation"] + tags: ['invariant-violation'], }); throw new Error(msg); } @@ -3321,8 +3475,10 @@ var SessionConnected = class extends IdentifiedSession { sendBufferedMessages() { if (this.sendBuffer.length > 0) { this.log?.info( - `sending ${this.sendBuffer.length} buffered messages, starting at seq ${this.nextSeq()}`, - this.loggingMetadata + `sending ${ + this.sendBuffer.length + } buffered messages, starting at seq ${this.nextSeq()}`, + this.loggingMetadata, ); for (const msg of this.sendBuffer) { this.assertSendOrdering(msg); @@ -3339,7 +3495,7 @@ var SessionConnected = class extends IdentifiedSession { get loggingMetadata() { return { ...super.loggingMetadata, - ...this.conn.loggingMetadata + ...this.conn.loggingMetadata, }; } startMissingHeartbeatTimeout() { @@ -3348,10 +3504,10 @@ var SessionConnected = class extends IdentifiedSession { this.heartbeatMissTimeout = setTimeout(() => { this.log?.info( `closing connection to ${this.to} due to inactivity (missed ${maxMisses} heartbeats which is ${missDuration}ms)`, - this.loggingMetadata + this.loggingMetadata, ); this.telemetry.span.addEvent( - "closing connection due to missing heartbeat" + 'closing connection due to missing heartbeat', ); this.conn.close(); }, missDuration); @@ -3363,13 +3519,13 @@ var SessionConnected = class extends IdentifiedSession { }, this.options.heartbeatIntervalMs); } sendHeartbeat() { - this.log?.debug("sending heartbeat", this.loggingMetadata); + this.log?.debug('sending heartbeat', this.loggingMetadata); const heartbeat = { - streamId: "heartbeat", + streamId: 'heartbeat', controlFlags: 1 /* AckBit */, payload: { - type: "ACK" - } + type: 'ACK', + }, }; this.send(heartbeat); } @@ -3377,7 +3533,7 @@ var SessionConnected = class extends IdentifiedSession { const parsedMsgRes = this.codec.fromBuffer(msg); if (!parsedMsgRes.ok) { this.listeners.onInvalidMessage( - `could not parse message: ${parsedMsgRes.reason}` + `could not parse message: ${parsedMsgRes.reason}`, ); return; } @@ -3388,19 +3544,19 @@ var SessionConnected = class extends IdentifiedSession { `received duplicate msg (got seq: ${parsedMsg.seq}, wanted seq: ${this.ack}), discarding`, { ...this.loggingMetadata, - transportMessage: parsedMsg - } + transportMessage: parsedMsg, + }, ); } else { const reason = `received out-of-order msg, closing connection (got seq: ${parsedMsg.seq}, wanted seq: ${this.ack})`; this.log?.error(reason, { ...this.loggingMetadata, transportMessage: parsedMsg, - tags: ["invariant-violation"] + tags: ['invariant-violation'], }); this.telemetry.span.setStatus({ code: SpanStatusCode.ERROR, - message: reason + message: reason, }); this.conn.close(); } @@ -3408,7 +3564,7 @@ var SessionConnected = class extends IdentifiedSession { } this.log?.debug(`received msg`, { ...this.loggingMetadata, - transportMessage: parsedMsg + transportMessage: parsedMsg, }); this.updateBookkeeping(parsedMsg.ack, parsedMsg.seq); if (!isAck(parsedMsg.controlFlags)) { @@ -3417,7 +3573,7 @@ var SessionConnected = class extends IdentifiedSession { } this.log?.debug(`discarding msg (ack bit set)`, { ...this.loggingMetadata, - transportMessage: parsedMsg + transportMessage: parsedMsg, }); if (!this.isActivelyHeartbeating) { this.sendHeartbeat(); @@ -3445,7 +3601,7 @@ var SessionConnected = class extends IdentifiedSession { // transport/sessionStateMachine/SessionBackingOff.ts var SessionBackingOff = class extends IdentifiedSessionWithGracePeriod { - state = "BackingOff" /* BackingOff */; + state = 'BackingOff' /* BackingOff */; listeners; backoffTimeout; constructor(props) { @@ -3468,7 +3624,7 @@ var SessionBackingOff = class extends IdentifiedSessionWithGracePeriod { }; // codec/adapter.ts -import { Value as Value2 } from "@sinclair/typebox/value"; +import { Value as Value2 } from '@sinclair/typebox/value'; var CodecMessageAdapter = class { constructor(codec) { this.codec = codec; @@ -3477,12 +3633,12 @@ var CodecMessageAdapter = class { try { return { ok: true, - value: this.codec.toBuffer(msg) + value: this.codec.toBuffer(msg), }; } catch (e) { return { ok: false, - reason: coerceErrorString(e) + reason: coerceErrorString(e), }; } } @@ -3492,17 +3648,17 @@ var CodecMessageAdapter = class { if (!Value2.Check(OpaqueTransportMessageSchema, parsedMsg)) { return { ok: false, - reason: "transport message schema mismatch" + reason: 'transport message schema mismatch', }; } return { ok: true, - value: parsedMsg + value: parsedMsg, }; } catch (e) { return { ok: false, - reason: coerceErrorString(e) + reason: coerceErrorString(e), }; } } @@ -3523,18 +3679,26 @@ function inheritSharedSession(session) { log: session.log, tracer: session.tracer, protocolVersion: session.protocolVersion, - codec: session.codec + codec: session.codec, }; } function inheritSharedSessionWithGrace(session) { return { ...inheritSharedSession(session), - graceExpiryTime: session.graceExpiryTime + graceExpiryTime: session.graceExpiryTime, }; } var SessionStateGraph = { entrypoints: { - NoConnection: (to, from, listeners, options, protocolVersion, tracer, log) => { + NoConnection: ( + to, + from, + listeners, + options, + protocolVersion, + tracer, + log, + ) => { const id = `session-${generateId()}`; const telemetry = createSessionTelemetryInfo(tracer, id, to, from); const sendBuffer = []; @@ -3553,11 +3717,11 @@ var SessionStateGraph = { protocolVersion, tracer, log, - codec: new CodecMessageAdapter(options.codec) + codec: new CodecMessageAdapter(options.codec), }); session.log?.info(`session ${session.id} created in NoConnection state`, { ...session.loggingMetadata, - tags: ["state-transition"] + tags: ['state-transition'], }); return session; }, @@ -3569,14 +3733,14 @@ var SessionStateGraph = { options, tracer, log, - codec: new CodecMessageAdapter(options.codec) + codec: new CodecMessageAdapter(options.codec), }); session.log?.info(`session created in WaitingForHandshake state`, { ...session.loggingMetadata, - tags: ["state-transition"] + tags: ['state-transition'], }); return session; - } + }, }, // All of the transitions 'move'/'consume' the old session and return a new one. // After a session is transitioned, any usage of the old session will throw. @@ -3588,14 +3752,14 @@ var SessionStateGraph = { const session = new SessionBackingOff({ backoffMs, listeners, - ...carriedState + ...carriedState, }); session.log?.info( `session ${session.id} transition from NoConnection to BackingOff`, { ...session.loggingMetadata, - tags: ["state-transition"] - } + tags: ['state-transition'], + }, ); return session; }, @@ -3605,14 +3769,14 @@ var SessionStateGraph = { const session = new SessionConnecting({ connPromise, listeners, - ...carriedState + ...carriedState, }); session.log?.info( `session ${session.id} transition from BackingOff to Connecting`, { ...session.loggingMetadata, - tags: ["state-transition"] - } + tags: ['state-transition'], + }, ); return session; }, @@ -3622,19 +3786,19 @@ var SessionStateGraph = { const session = new SessionHandshaking({ conn, listeners, - ...carriedState + ...carriedState, }); conn.telemetry = createConnectionTelemetryInfo( session.tracer, conn, - session.telemetry + session.telemetry, ); session.log?.info( `session ${session.id} transition from Connecting to Handshaking`, { ...session.loggingMetadata, - tags: ["state-transition"] - } + tags: ['state-transition'], + }, ); return session; }, @@ -3645,67 +3809,73 @@ var SessionStateGraph = { const session = new SessionConnected({ conn, listeners, - ...carriedState + ...carriedState, }); session.startMissingHeartbeatTimeout(); session.log?.info( `session ${session.id} transition from Handshaking to Connected`, { ...session.loggingMetadata, - tags: ["state-transition"] - } + tags: ['state-transition'], + }, ); return session; }, - WaitingForHandshakeToConnected: (pendingSession, oldSession, sessionId, to, propagationCtx, listeners, protocolVersion) => { + WaitingForHandshakeToConnected: ( + pendingSession, + oldSession, + sessionId, + to, + propagationCtx, + listeners, + protocolVersion, + ) => { const conn = pendingSession.conn; const { from, options } = pendingSession; - const carriedState = oldSession ? ( - // old session exists, inherit state - inheritSharedSession(oldSession) - ) : ( - // old session does not exist, create new state - { - id: sessionId, - from, - to, - seq: 0, - ack: 0, - seqSent: 0, - sendBuffer: [], - telemetry: createSessionTelemetryInfo( - pendingSession.tracer, - sessionId, - to, + const carriedState = oldSession + ? // old session exists, inherit state + inheritSharedSession(oldSession) + : // old session does not exist, create new state + { + id: sessionId, from, - propagationCtx - ), - options, - tracer: pendingSession.tracer, - log: pendingSession.log, - protocolVersion, - codec: new CodecMessageAdapter(options.codec) - } - ); + to, + seq: 0, + ack: 0, + seqSent: 0, + sendBuffer: [], + telemetry: createSessionTelemetryInfo( + pendingSession.tracer, + sessionId, + to, + from, + propagationCtx, + ), + options, + tracer: pendingSession.tracer, + log: pendingSession.log, + protocolVersion, + codec: new CodecMessageAdapter(options.codec), + }; pendingSession._handleStateExit(); oldSession?._handleStateExit(); const session = new SessionConnected({ conn, listeners, - ...carriedState + ...carriedState, }); session.startMissingHeartbeatTimeout(); conn.telemetry = createConnectionTelemetryInfo( session.tracer, conn, - session.telemetry + session.telemetry, ); session.log?.info( `session ${session.id} transition from WaitingForHandshake to Connected`, { ...session.loggingMetadata, - tags: ["state-transition"] - } + tags: ['state-transition'], + }, ); return session; }, @@ -3715,14 +3885,14 @@ var SessionStateGraph = { oldSession._handleStateExit(); const session = new SessionNoConnection({ listeners, - ...carriedState + ...carriedState, }); session.log?.info( `session ${session.id} transition from BackingOff to NoConnection`, { ...session.loggingMetadata, - tags: ["state-transition"] - } + tags: ['state-transition'], + }, ); return session; }, @@ -3732,14 +3902,14 @@ var SessionStateGraph = { oldSession._handleStateExit(); const session = new SessionNoConnection({ listeners, - ...carriedState + ...carriedState, }); session.log?.info( `session ${session.id} transition from Connecting to NoConnection`, { ...session.loggingMetadata, - tags: ["state-transition"] - } + tags: ['state-transition'], + }, ); return session; }, @@ -3749,37 +3919,38 @@ var SessionStateGraph = { oldSession._handleStateExit(); const session = new SessionNoConnection({ listeners, - ...carriedState + ...carriedState, }); session.log?.info( `session ${session.id} transition from Handshaking to NoConnection`, { ...session.loggingMetadata, - tags: ["state-transition"] - } + tags: ['state-transition'], + }, ); return session; }, ConnectedToNoConnection: (oldSession, listeners) => { const carriedState = inheritSharedSession(oldSession); - const graceExpiryTime = Date.now() + oldSession.options.sessionDisconnectGraceMs; + const graceExpiryTime = + Date.now() + oldSession.options.sessionDisconnectGraceMs; oldSession.conn.close(); oldSession._handleStateExit(); const session = new SessionNoConnection({ listeners, graceExpiryTime, - ...carriedState + ...carriedState, }); session.log?.info( `session ${session.id} transition from Connected to NoConnection`, { ...session.loggingMetadata, - tags: ["state-transition"] - } + tags: ['state-transition'], + }, ); return session; - } - } + }, + }, }; var transitions = SessionStateGraph.transition; var ClientSessionStateGraph = { @@ -3802,14 +3973,14 @@ var ClientSessionStateGraph = { // Handshaking -> NoConnection: connection closed or handshake timeout HandshakingToNoConnection: transitions.HandshakingToNoConnection, // Connected -> NoConnection: connection closed - ConnectedToNoConnection: transitions.ConnectedToNoConnection + ConnectedToNoConnection: transitions.ConnectedToNoConnection, // destroy/close paths // NoConnection -> x: grace period elapsed // BackingOff -> x: grace period elapsed // Connecting -> x: grace period elapsed // Handshaking -> x: grace period elapsed or invalid handshake message or handshake rejection // Connected -> x: grace period elapsed or invalid message - } + }, }; var ServerSessionStateGraph = { entrypoint: SessionStateGraph.entrypoints.WaitingForHandshake, @@ -3819,10 +3990,10 @@ var ServerSessionStateGraph = { WaitingForHandshakeToConnected: transitions.WaitingForHandshakeToConnected, // disconnect paths // Connected -> NoConnection: connection closed - ConnectedToNoConnection: transitions.ConnectedToNoConnection + ConnectedToNoConnection: transitions.ConnectedToNoConnection, // destroy/close paths // WaitingForHandshake -> x: handshake timeout elapsed or invalid handshake message or handshake rejection or connection closed - } + }, }; // transport/transport.ts @@ -3855,12 +4026,12 @@ var Transport = class { this.options = { ...defaultTransportOptions, ...providedOptions }; this.eventDispatcher = new EventDispatcher(); this.clientId = clientId; - this.status = "open"; + this.status = 'open'; this.sessions = /* @__PURE__ */ new Map(); this.tracer = getTracer(); } bindLogger(fn, level) { - if (typeof fn === "function") { + if (typeof fn === 'function') { this.log = createLogProxy(new BaseLogger(fn, level)); return; } @@ -3872,8 +4043,8 @@ var Transport = class { * @param message The received message. */ handleMsg(message) { - if (this.getStatus() !== "open") return; - this.eventDispatcher.dispatchEvent("message", message); + if (this.getStatus() !== 'open') return; + this.eventDispatcher.dispatchEvent('message', message); } /** * Adds a listener to this transport. @@ -3892,7 +4063,7 @@ var Transport = class { this.eventDispatcher.removeEventListener(type, handler); } protocolError(message) { - this.eventDispatcher.dispatchEvent("protocolError", message); + this.eventDispatcher.dispatchEvent('protocolError', message); } /** * Default close implementation for transports. You should override this in the downstream @@ -3900,13 +4071,13 @@ var Transport = class { * Closes the transport. Any messages sent while the transport is closed will be silently discarded. */ close() { - this.status = "closed"; + this.status = 'closed'; const sessions = Array.from(this.sessions.values()); for (const session of sessions) { this.deleteSession(session); } - this.eventDispatcher.dispatchEvent("transportStatus", { - status: this.status + this.eventDispatcher.dispatchEvent('transportStatus', { + status: this.status, }); this.eventDispatcher.removeAllListeners(); this.log?.info(`manually closed transport`, { clientId: this.clientId }); @@ -3921,18 +4092,18 @@ var Transport = class { const msg = `attempt to create session for ${session.to} but active session (${activeSession.id}) already exists`; this.log?.error(msg, { ...session.loggingMetadata, - tags: ["invariant-violation"] + tags: ['invariant-violation'], }); throw new Error(msg); } this.sessions.set(session.to, session); - this.eventDispatcher.dispatchEvent("sessionStatus", { - status: "created", - session + this.eventDispatcher.dispatchEvent('sessionStatus', { + status: 'created', + session, }); - this.eventDispatcher.dispatchEvent("sessionTransition", { + this.eventDispatcher.dispatchEvent('sessionTransition', { state: session.state, - id: session.id + id: session.id, }); } updateSession(session) { @@ -3941,7 +4112,7 @@ var Transport = class { const msg = `attempt to transition session for ${session.to} but no active session exists`; this.log?.error(msg, { ...session.loggingMetadata, - tags: ["invariant-violation"] + tags: ['invariant-violation'], }); throw new Error(msg); } @@ -3949,66 +4120,69 @@ var Transport = class { const msg = `attempt to transition active session for ${session.to} but active session (${activeSession.id}) is different from handle (${session.id})`; this.log?.error(msg, { ...session.loggingMetadata, - tags: ["invariant-violation"] + tags: ['invariant-violation'], }); throw new Error(msg); } this.sessions.set(session.to, session); - this.eventDispatcher.dispatchEvent("sessionTransition", { + this.eventDispatcher.dispatchEvent('sessionTransition', { state: session.state, - id: session.id + id: session.id, }); } deleteSession(session, options) { if (session._isConsumed) return; const loggingMetadata = session.loggingMetadata; if (loggingMetadata.tags && options?.unhealthy) { - loggingMetadata.tags.push("unhealthy-session"); + loggingMetadata.tags.push('unhealthy-session'); } session.log?.info(`closing session ${session.id}`, loggingMetadata); - this.eventDispatcher.dispatchEvent("sessionStatus", { - status: "closing", - session + this.eventDispatcher.dispatchEvent('sessionStatus', { + status: 'closing', + session, }); const to = session.to; session.close(); this.sessions.delete(to); - this.eventDispatcher.dispatchEvent("sessionStatus", { - status: "closed", - session: { id: session.id, to } + this.eventDispatcher.dispatchEvent('sessionStatus', { + status: 'closed', + session: { id: session.id, to }, }); } // common listeners onSessionGracePeriodElapsed(session) { this.log?.info( `session to ${session.to} grace period elapsed, closing`, - session.loggingMetadata + session.loggingMetadata, ); this.deleteSession(session); } onConnectingFailed(session) { - const noConnectionSession = SessionStateGraph.transition.ConnectingToNoConnection(session, { - onSessionGracePeriodElapsed: () => { - this.onSessionGracePeriodElapsed(noConnectionSession); - } - }); + const noConnectionSession = + SessionStateGraph.transition.ConnectingToNoConnection(session, { + onSessionGracePeriodElapsed: () => { + this.onSessionGracePeriodElapsed(noConnectionSession); + }, + }); this.updateSession(noConnectionSession); return noConnectionSession; } onConnClosed(session) { let noConnectionSession; - if (session.state === "Handshaking" /* Handshaking */) { - noConnectionSession = SessionStateGraph.transition.HandshakingToNoConnection(session, { - onSessionGracePeriodElapsed: () => { - this.onSessionGracePeriodElapsed(noConnectionSession); - } - }); + if (session.state === 'Handshaking' /* Handshaking */) { + noConnectionSession = + SessionStateGraph.transition.HandshakingToNoConnection(session, { + onSessionGracePeriodElapsed: () => { + this.onSessionGracePeriodElapsed(noConnectionSession); + }, + }); } else { - noConnectionSession = SessionStateGraph.transition.ConnectedToNoConnection(session, { - onSessionGracePeriodElapsed: () => { - this.onSessionGracePeriodElapsed(noConnectionSession); - } - }); + noConnectionSession = + SessionStateGraph.transition.ConnectedToNoConnection(session, { + onSessionGracePeriodElapsed: () => { + this.onSessionGracePeriodElapsed(noConnectionSession); + }, + }); } this.updateSession(noConnectionSession); return noConnectionSession; @@ -4022,20 +4196,20 @@ var Transport = class { * onto a session object is not recommended. */ getSessionBoundSendFn(to, sessionId) { - if (this.getStatus() !== "open") { - throw new Error("cannot get a bound send function on a closed transport"); + if (this.getStatus() !== 'open') { + throw new Error('cannot get a bound send function on a closed transport'); } return (msg) => { const session = this.sessions.get(to); if (!session) { throw new Error( - `session scope for ${sessionId} has ended (close), can't send` + `session scope for ${sessionId} has ended (close), can't send`, ); } const sameSession = session.id === sessionId; if (!sameSession || session._isConsumed) { throw new Error( - `session scope for ${sessionId} has ended (transition), can't send` + `session scope for ${sessionId} has ended (transition), can't send`, ); } const res = session.send(msg); @@ -4048,7 +4222,7 @@ var Transport = class { }; // transport/server.ts -import { Value as Value3 } from "@sinclair/typebox/value"; +import { Value as Value3 } from '@sinclair/typebox/value'; var ServerTransport = class extends Transport { /** * The options for this transport. @@ -4069,11 +4243,11 @@ var ServerTransport = class extends Transport { this.sessions = /* @__PURE__ */ new Map(); this.options = { ...defaultServerTransportOptions, - ...providedOptions + ...providedOptions, }; this.log?.info(`initiated server transport`, { clientId: this.clientId, - protocolVersion: currentProtocolVersion + protocolVersion: currentProtocolVersion, }); } extendHandshake(options) { @@ -4088,10 +4262,10 @@ var ServerTransport = class extends Transport { super.deleteSession(session, options); } handleConnection(conn) { - if (this.getStatus() !== "open") return; + if (this.getStatus() !== 'open') return; this.log?.info(`new incoming connection`, { ...conn.loggingMetadata, - clientId: this.clientId + clientId: this.clientId, }); let receivedHandshake = false; const pendingSession = ServerSessionStateGraph.entrypoint( @@ -4101,7 +4275,7 @@ var ServerTransport = class extends Transport { onConnectionClosed: () => { this.log?.warn( `connection from unknown closed before handshake finished`, - pendingSession.loggingMetadata + pendingSession.loggingMetadata, ); this.deletePendingSession(pendingSession); }, @@ -4109,14 +4283,14 @@ var ServerTransport = class extends Transport { const errorString = coerceErrorString(err); this.log?.warn( `connection from unknown errored before handshake finished: ${errorString}`, - pendingSession.loggingMetadata + pendingSession.loggingMetadata, ); this.deletePendingSession(pendingSession); }, onHandshakeTimeout: () => { this.log?.warn( `connection from unknown timed out before handshake finished`, - pendingSession.loggingMetadata + pendingSession.loggingMetadata, ); this.deletePendingSession(pendingSession); }, @@ -4127,8 +4301,8 @@ var ServerTransport = class extends Transport { { ...pendingSession.loggingMetadata, connectedTo: msg.from, - transportMessage: msg - } + transportMessage: msg, + }, ); this.deletePendingSession(pendingSession); return; @@ -4139,26 +4313,26 @@ var ServerTransport = class extends Transport { onInvalidHandshake: (reason, code) => { this.log?.error( `invalid handshake: ${reason}`, - pendingSession.loggingMetadata + pendingSession.loggingMetadata, ); this.deletePendingSession(pendingSession); this.protocolError({ type: ProtocolError.HandshakeFailed, code, - message: reason + message: reason, }); - } + }, }, this.options, this.tracer, - this.log + this.log, ); this.pendingSessions.add(pendingSession); } rejectHandshakeRequest(session, to, reason, code, metadata) { session.conn.telemetry?.span.setStatus({ code: SpanStatusCode.ERROR, - message: reason + message: reason, }); this.log?.warn(reason, metadata); const responseMsg = handshakeResponseMessage({ @@ -4167,18 +4341,18 @@ var ServerTransport = class extends Transport { status: { ok: false, code, - reason - } + reason, + }, }); const res = session.sendHandshake(responseMsg); if (!res.ok) { this.log?.error(`failed to send handshake response: ${res.reason}`, { ...session.loggingMetadata, - transportMessage: responseMsg + transportMessage: responseMsg, }); this.protocolError({ type: ProtocolError.MessageSendFailure, - message: res.reason + message: res.reason, }); this.deletePendingSession(session); return; @@ -4186,7 +4360,7 @@ var ServerTransport = class extends Transport { this.protocolError({ type: ProtocolError.HandshakeFailed, code, - message: reason + message: reason, }); this.deletePendingSession(session); } @@ -4195,16 +4369,16 @@ var ServerTransport = class extends Transport { this.rejectHandshakeRequest( session, msg.from, - "received invalid handshake request", - "MALFORMED_HANDSHAKE", + 'received invalid handshake request', + 'MALFORMED_HANDSHAKE', { ...session.loggingMetadata, transportMessage: msg, connectedTo: msg.from, validationErrors: [ - ...Value3.Errors(ControlMessageHandshakeRequestSchema, msg.payload) - ] - } + ...Value3.Errors(ControlMessageHandshakeRequestSchema, msg.payload), + ], + }, ); return; } @@ -4214,71 +4388,81 @@ var ServerTransport = class extends Transport { session, msg.from, `expected protocol version oneof [${acceptedProtocolVersions.toString()}], got ${gotVersion}`, - "PROTOCOL_VERSION_MISMATCH", + 'PROTOCOL_VERSION_MISMATCH', { ...session.loggingMetadata, connectedTo: msg.from, - transportMessage: msg - } + transportMessage: msg, + }, ); return; } let parsedMetadata = {}; if (this.handshakeExtensions) { - if (!Value3.Check(this.handshakeExtensions.schema, msg.payload.metadata)) { + if ( + !Value3.Check(this.handshakeExtensions.schema, msg.payload.metadata) + ) { this.rejectHandshakeRequest( session, msg.from, - "received malformed handshake metadata", - "MALFORMED_HANDSHAKE_META", + 'received malformed handshake metadata', + 'MALFORMED_HANDSHAKE_META', { ...session.loggingMetadata, connectedTo: msg.from, validationErrors: [ ...Value3.Errors( this.handshakeExtensions.schema, - msg.payload.metadata - ) - ] - } + msg.payload.metadata, + ), + ], + }, ); return; } const previousParsedMetadata = this.sessionHandshakeMetadata.get( - msg.from - ); - const parsedMetadataOrFailureCode = await this.handshakeExtensions.validate( - msg.payload.metadata, - previousParsedMetadata + msg.from, ); + const parsedMetadataOrFailureCode = + await this.handshakeExtensions.validate( + msg.payload.metadata, + previousParsedMetadata, + ); if (session._isConsumed) { return; } - if (Value3.Check( - HandshakeErrorCustomHandlerFatalResponseCodes, - parsedMetadataOrFailureCode - )) { + if ( + Value3.Check( + HandshakeErrorCustomHandlerFatalResponseCodes, + parsedMetadataOrFailureCode, + ) + ) { this.rejectHandshakeRequest( session, msg.from, - "rejected by handshake handler", + 'rejected by handshake handler', parsedMetadataOrFailureCode, { ...session.loggingMetadata, connectedTo: msg.from, - clientId: this.clientId - } + clientId: this.clientId, + }, ); return; } parsedMetadata = parsedMetadataOrFailureCode; } - let connectCase = "new session"; - const clientNextExpectedSeq = msg.payload.expectedSessionState.nextExpectedSeq; + let connectCase = 'new session'; + const clientNextExpectedSeq = + msg.payload.expectedSessionState.nextExpectedSeq; const clientNextSentSeq = msg.payload.expectedSessionState.nextSentSeq; let oldSession = this.sessions.get(msg.from); - if (this.options.enableTransparentSessionReconnects && oldSession && oldSession.id === msg.payload.sessionId) { - connectCase = "transparent reconnection"; + if ( + this.options.enableTransparentSessionReconnects && + oldSession && + oldSession.id === msg.payload.sessionId + ) { + connectCase = 'transparent reconnection'; const ourNextSeq = oldSession.nextSeq(); const ourAck = oldSession.ack; if (clientNextSentSeq > ourAck) { @@ -4286,12 +4470,12 @@ var ServerTransport = class extends Transport { session, msg.from, `client is in the future: server wanted next message to be ${ourAck} but client would have sent ${clientNextSentSeq}`, - "SESSION_STATE_MISMATCH", + 'SESSION_STATE_MISMATCH', { ...session.loggingMetadata, connectedTo: msg.from, - transportMessage: msg - } + transportMessage: msg, + }, ); return; } @@ -4300,53 +4484,56 @@ var ServerTransport = class extends Transport { session, msg.from, `server is in the future: client wanted next message to be ${clientNextExpectedSeq} but server would have sent ${ourNextSeq}`, - "SESSION_STATE_MISMATCH", + 'SESSION_STATE_MISMATCH', { ...session.loggingMetadata, connectedTo: msg.from, - transportMessage: msg - } + transportMessage: msg, + }, ); return; } - if (oldSession.state !== "NoConnection" /* NoConnection */) { - const noConnectionSession = ServerSessionStateGraph.transition.ConnectedToNoConnection( - oldSession, - { - onSessionGracePeriodElapsed: () => { - this.onSessionGracePeriodElapsed(noConnectionSession); - } - } - ); + if (oldSession.state !== 'NoConnection' /* NoConnection */) { + const noConnectionSession = + ServerSessionStateGraph.transition.ConnectedToNoConnection( + oldSession, + { + onSessionGracePeriodElapsed: () => { + this.onSessionGracePeriodElapsed(noConnectionSession); + }, + }, + ); oldSession = noConnectionSession; this.updateSession(oldSession); } } else if (oldSession) { - connectCase = "hard reconnection"; + connectCase = 'hard reconnection'; this.log?.info( `client is reconnecting to a new session (${msg.payload.sessionId}) with an old session (${oldSession.id}) already existing, closing old session`, { ...session.loggingMetadata, connectedTo: msg.from, - sessionId: msg.payload.sessionId - } + sessionId: msg.payload.sessionId, + }, ); this.deleteSession(oldSession); oldSession = void 0; } if (!oldSession && (clientNextSentSeq > 0 || clientNextExpectedSeq > 0)) { - connectCase = "unknown session"; - const rejectionMessage = this.options.enableTransparentSessionReconnects ? `client is trying to reconnect to a session the server don't know about: ${msg.payload.sessionId}` : `client is attempting a transparent reconnect to a session but the server does not support it: ${msg.payload.sessionId}`; + connectCase = 'unknown session'; + const rejectionMessage = this.options.enableTransparentSessionReconnects + ? `client is trying to reconnect to a session the server don't know about: ${msg.payload.sessionId}` + : `client is attempting a transparent reconnect to a session but the server does not support it: ${msg.payload.sessionId}`; this.rejectHandshakeRequest( session, msg.from, rejectionMessage, - "SESSION_STATE_MISMATCH", + 'SESSION_STATE_MISMATCH', { ...session.loggingMetadata, connectedTo: msg.from, - transportMessage: msg - } + transportMessage: msg, + }, ); return; } @@ -4355,81 +4542,82 @@ var ServerTransport = class extends Transport { `handshake from ${msg.from} ok (${connectCase}), responding with handshake success`, { ...session.loggingMetadata, - connectedTo: msg.from - } + connectedTo: msg.from, + }, ); const responseMsg = handshakeResponseMessage({ from: this.clientId, to: msg.from, status: { ok: true, - sessionId - } + sessionId, + }, }); const res = session.sendHandshake(responseMsg); if (!res.ok) { this.log?.error(`failed to send handshake response: ${res.reason}`, { ...session.loggingMetadata, - transportMessage: responseMsg + transportMessage: responseMsg, }); this.protocolError({ type: ProtocolError.MessageSendFailure, - message: res.reason + message: res.reason, }); this.deletePendingSession(session); return; } this.pendingSessions.delete(session); - const connectedSession = ServerSessionStateGraph.transition.WaitingForHandshakeToConnected( - session, - // by this point oldSession is either no connection or we dont have an old session - oldSession, - sessionId, - msg.from, - msg.tracing, - { - onConnectionErrored: (err) => { - const errStr = coerceErrorString(err); - this.log?.warn( - `connection to ${connectedSession.to} errored: ${errStr}`, - connectedSession.loggingMetadata - ); - }, - onConnectionClosed: () => { - this.log?.info( - `connection to ${connectedSession.to} closed`, - connectedSession.loggingMetadata - ); - this.onConnClosed(connectedSession); - }, - onMessage: (msg2) => { - this.handleMsg(msg2); - }, - onInvalidMessage: (reason) => { - this.log?.error(`invalid message: ${reason}`, { - ...connectedSession.loggingMetadata, - transportMessage: msg - }); - this.protocolError({ - type: ProtocolError.InvalidMessage, - message: reason - }); - this.deleteSession(connectedSession, { unhealthy: true }); + const connectedSession = + ServerSessionStateGraph.transition.WaitingForHandshakeToConnected( + session, + // by this point oldSession is either no connection or we dont have an old session + oldSession, + sessionId, + msg.from, + msg.tracing, + { + onConnectionErrored: (err) => { + const errStr = coerceErrorString(err); + this.log?.warn( + `connection to ${connectedSession.to} errored: ${errStr}`, + connectedSession.loggingMetadata, + ); + }, + onConnectionClosed: () => { + this.log?.info( + `connection to ${connectedSession.to} closed`, + connectedSession.loggingMetadata, + ); + this.onConnClosed(connectedSession); + }, + onMessage: (msg2) => { + this.handleMsg(msg2); + }, + onInvalidMessage: (reason) => { + this.log?.error(`invalid message: ${reason}`, { + ...connectedSession.loggingMetadata, + transportMessage: msg, + }); + this.protocolError({ + type: ProtocolError.InvalidMessage, + message: reason, + }); + this.deleteSession(connectedSession, { unhealthy: true }); + }, + onMessageSendFailure: (msg2, reason) => { + this.log?.error(`failed to send message: ${reason}`, { + ...connectedSession.loggingMetadata, + transportMessage: msg2, + }); + this.protocolError({ + type: ProtocolError.MessageSendFailure, + message: reason, + }); + this.deleteSession(connectedSession, { unhealthy: true }); + }, }, - onMessageSendFailure: (msg2, reason) => { - this.log?.error(`failed to send message: ${reason}`, { - ...connectedSession.loggingMetadata, - transportMessage: msg2 - }); - this.protocolError({ - type: ProtocolError.MessageSendFailure, - message: reason - }); - this.deleteSession(connectedSession, { unhealthy: true }); - } - }, - gotVersion - ); + gotVersion, + ); const bufferSendRes = connectedSession.sendBufferedMessages(); if (!bufferSendRes.ok) { return; @@ -4448,7 +4636,7 @@ var ServerTransport = class extends Transport { function cleanHeaders(headers) { const cleanedHeaders = {}; for (const [key, value] of Object.entries(headers)) { - if (!key.startsWith("sec-") && value) { + if (!key.startsWith('sec-') && value) { const cleanedValue = Array.isArray(value) ? value[0] : value; cleanedHeaders[key] = cleanedValue; } @@ -4460,22 +4648,22 @@ var WebSocketServerTransport = class extends ServerTransport { constructor(wss, clientId, providedOptions) { super(clientId, providedOptions); this.wss = wss; - this.wss.on("connection", this.connectionHandler); + this.wss.on('connection', this.connectionHandler); } connectionHandler = (ws, req) => { const conn = new WebSocketConnection(ws, { - headers: cleanHeaders(req.headersDistinct) + headers: cleanHeaders(req.headersDistinct), }); this.handleConnection(conn); }; close() { super.close(); - this.wss.off("connection", this.connectionHandler); + this.wss.off('connection', this.connectionHandler); } }; // python-client/tests/test_server_handshake.ts -import { Type as Type6 } from "@sinclair/typebox"; +import { Type as Type6 } from '@sinclair/typebox'; var ServiceSchema = createServiceSchema(); var HandshakeTestServiceSchema = ServiceSchema.define({ echo: Procedure.rpc({ @@ -4484,47 +4672,44 @@ var HandshakeTestServiceSchema = ServiceSchema.define({ responseError: Type6.Never(), async handler({ reqInit }) { return Ok({ response: reqInit.msg }); - } - }) + }, + }), }); var services = { - test: HandshakeTestServiceSchema + test: HandshakeTestServiceSchema, }; var handshakeSchema = Type6.Object({ token: Type6.String() }); async function main() { const httpServer = http.createServer(); const port = await new Promise((resolve, reject) => { - httpServer.listen(0, "127.0.0.1", () => { + httpServer.listen(0, '127.0.0.1', () => { const addr = httpServer.address(); - if (typeof addr === "object" && addr) resolve(addr.port); + if (typeof addr === 'object' && addr) resolve(addr.port); else reject(new Error("couldn't get port")); }); }); const wss = new WebSocketServer({ server: httpServer }); - const serverTransport = new WebSocketServerTransport( - wss, - "HANDSHAKE_SERVER" - ); + const serverTransport = new WebSocketServerTransport(wss, 'HANDSHAKE_SERVER'); const _server = createServer(serverTransport, services, { handshakeOptions: createServerHandshakeOptions( handshakeSchema, (metadata) => { - if (metadata.token !== "valid-token") { - return "REJECTED_BY_CUSTOM_HANDLER"; + if (metadata.token !== 'valid-token') { + return 'REJECTED_BY_CUSTOM_HANDLER'; } return {}; - } - ) + }, + ), }); process.stdout.write(`RIVER_PORT=${port} `); - process.on("SIGTERM", () => { + process.on('SIGTERM', () => { void _server.close().then(() => { httpServer.close(); process.exit(0); }); }); - process.on("SIGINT", () => { + process.on('SIGINT', () => { void _server.close().then(() => { httpServer.close(); process.exit(0); @@ -4532,6 +4717,6 @@ async function main() { }); } main().catch((err) => { - console.error("Failed to start handshake test server:", err); + console.error('Failed to start handshake test server:', err); process.exit(1); }); From f5647c4f9b379246165644d57bae6a67d7c3396f Mon Sep 17 00:00:00 2001 From: Jacky Zhao Date: Mon, 2 Mar 2026 17:54:08 -0800 Subject: [PATCH 10/29] fix --- .prettierignore | 3 +- python-client/.coverage | Bin 53248 -> 0 bytes python-client/.gitignore | 5 +- python-client/river/client.py | 16 +- python-client/river/codegen/schema.py | 17 +- python-client/river/streams.py | 2 + python-client/river/transport.py | 6 +- python-client/tests/test_equivalence.py | 43 + python-client/tests/test_server_handshake.mjs | 2065 ++++++++--------- python-client/tests/test_session.py | 211 ++ 10 files changed, 1233 insertions(+), 1135 deletions(-) delete mode 100644 python-client/.coverage diff --git a/.prettierignore b/.prettierignore index 83de37ed..11d5e660 100644 --- a/.prettierignore +++ b/.prettierignore @@ -2,8 +2,7 @@ node_modules python-client/.venv python-client/.pytest_cache -python-client/tests/test_server.mjs -python-client/tests/extract_test_schema.mjs +python-client/tests/*.mjs python-client/tests/test_schema.json python-client/tests/generated .codex-review-tmp diff --git a/python-client/.coverage b/python-client/.coverage deleted file mode 100644 index fafba2ef5d198605b691eaf9fdf502241e1aba0e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 53248 zcmeI4Yiu0V702)F>}%HE9jBIATq}4`n>ud%h)LBJk`g=Mn1~2Mn?!_`ZpQ1eJ!W^t znVC&&D%)8nBM%eT?z!{W zwG$g*r;$bYkGy+l?!D*SbAI>Sd-v{m=X(!r)g4XgHLbkrDC<}i69m?-D2y?QUP*cd zM~qIw!3Cwltn-CVCD!tj$)voKRYo6X@(+_=mYXa4D*st=xboZ5jtV{2B(+ch4iEqV z5CDOjhCuP5%2>L#Ru~^~)b5<-SZY?Yy!Yfi-|g(!-l=Tw*tE4%@$!__35Aa4W~D>1 z%zetBW+}aTPE&NFN6)H`ZuBWmzZTRk*jf)?vnY>NEncz3mFH&}y@#Y6t&da;T6$i! zhLy*(;U>3&-^=g)8WHsBfVzu7Wm}opOmWZXRtA!^u z*C*A=(qF|KZz!Ye%9dVIlr^m$Zhg0<8rgo$Zc@~ehgp;C9CAEwEi>Qhm`YAJTsI9{ zcXZQGv>`29aI~Iz9c<#zf`j1A_J(xr<__)lB>lE#(J2ehBk5Q7g?q_3!wj&N1$omX z?AmM;^4)}dppYNrokJ2{!;ZO-C&d;>bTjjwO@mRXPUU+hk z8z>x4rdxOH(up)DXiC`&gbpuXIBxhTnHwa06yCTzmb7Reo7+a$OLUTb-t+!xg`B1UB4_R}sW~qHzi(l$Z3(Fly z6gq8EG~HRhQ0SCGAk$VlO%!XRv2K1axua{}_PvXT`IF_!d5sKc3;PVoHMdmocbIL>Ds)gXQ zn?I;J{mQkr^K3G_AreimuUW`&ZbGJwzef*NgUki-_7+SF~uwc*-TX=7Q3J&w0$6v4bGVE!kxt|YC-i|kkkf72)gVLrUdX>&Vsb9)xtU!OuATi-u z^J`{$*%RWL;HQ z4zdSnmZpzLvxAERpT&@0dM)r7(d+hZ4_#2p|A3TeY4 zL*hy~__vU?Rf1So>;DhHw_4iPwR|H%UWMdyOg<-H_*^;&qd@=!KmY_l00ck)1V8`; zKmY_l00c?|D#evT@HIdvA=XL$w*dV9e?{`EOg=6b<&M-$>W$QoQ=Q4F9mn*9UON0 zO`|!R(>25K^Ri|SJ%h$*&1CYbZe%j_i}ub0DUz3}$ook*so5-%niWe`qvdtS(Ja!_ z6~8{ebVGMszwVBaB4M!x*&02>z%?$DD6^4$ClA(4$O)&MWiNP43{t&58l&tytn$M64FBv&!{b$LAX zX7azuRmqo=MryOXNoJ`hzEHy)&Vm34fB*=900@8p2!H?xfB<9ng^SgT-rml$|F75_ zF3L+*RNDV1y28a3OIB0f|4Y~DS7{R&-xw|mi#Djd|Br1DM;0v}?Ej-n5f1kMk&bXN zviPQF?f=6|^S!kH7r!HpE!u$6{y#*mi_&%M|MC1kOmH9o0w4eaAOHd&00JNY0w4ea zAaGL?5CxGX`Tc)Eo?`Te0|Y<-1V8`;KmY_l00ck)1V8`;K;R}MAc{$e-~Ugfo@Vl8 z`DOWt{BwC!-X`Ccx{`W{GI4+a2!H?xfB*=900@8p2!H?xfWZHoz^X)8khVrAvWbf) zPMn!OI`i7anPc^5&wlOpgSXt)appnk;9s8WVyP`sM39PK3Xi`hoPO{8Q<16jA5VNd z)K_)p&1oqPYqzlQ$!gWVT@ed|jTUyt3X9FU@nMfV>Po}Z}MDehu_D*o(bXj`rD z%bK75=Fhjssl|K6fn85zj$YcaW83i~t;eQ6I6D$;xpXLhwDN=BPk(c5jH-0UHXRpE ze>i$a{PN)wcCuyac;p`g-**nKIr0qK|E&1pbbS587vB2&g%?I|d2OaCN)nGGLMx9= zz5B}8NAJGQ^ylu6d? z)E;L08EdCU@<+o|l8Qf4EmFEVGNENpy{-LX*JG#O_{!lQb%Z#CP>3M38-~#$GFEl$ zQ8xN3HuV4-oxV%BT*Ll7dO~DtU-@Ku^1uTVm-nYnK0Y})F|+QSmCuebwmTLT!je?= zNr>P7Pspz_`HK7>`7QZP`Mmt9{I+~neqTOCML0kJ1V8`;KmY_l00ck)1V8`;KmY`m znE?N)L5L>YBrLg!7 None: + nonlocal clean_close try: send_fn( PartialTransportMessage( @@ -310,7 +311,20 @@ def write_cb(raw_value: Any) -> None: ) ) except RuntimeError: - pass + # Session is gone — push disconnect error and tear down + clean_close = False + try: + res_readable._push_value( + err_result( + UNEXPECTED_DISCONNECT_CODE, + "send failed: session closed", + ) + ) + except RuntimeError: + pass + close_readable() + if req_writable.is_writable(): + req_writable._closed = True def close_cb() -> None: nonlocal clean_close diff --git a/python-client/river/codegen/schema.py b/python-client/river/codegen/schema.py index 9045ae33..d61568a2 100644 --- a/python-client/river/codegen/schema.py +++ b/python-client/river/codegen/schema.py @@ -85,12 +85,21 @@ class SchemaIR: # --------------------------------------------------------------------------- +def _sanitize_identifier(s: str) -> str: + """Replace characters illegal in Python identifiers with underscores.""" + # Replace dashes, spaces, and other non-alnum/non-underscore chars + s = re.sub(r"[^a-zA-Z0-9_]", "_", s) + # Strip leading underscores/digits so the result is a valid identifier + s = re.sub(r"^[_0-9]+", "", s) + return s or "unnamed" + + def _to_pascal_case(s: str) -> str: """Convert a camelCase, snake_case, or space-separated string to PascalCase.""" + s = _sanitize_identifier(s) # Handle snake_case or space-separated - if "_" in s or " " in s: - # Split on underscores and spaces - words = re.split(r"[_ ]+", s) + if "_" in s: + words = re.split(r"_+", s) return "".join(word.capitalize() for word in words if word) # camelCase → PascalCase: just capitalize first letter if s: @@ -100,6 +109,7 @@ def _to_pascal_case(s: str) -> str: def _to_snake_case(s: str) -> str: """Convert camelCase to snake_case.""" + s = _sanitize_identifier(s) result = re.sub(r"([A-Z])", r"_\1", s).lower() result = result.lstrip("_") if keyword.iskeyword(result): @@ -109,6 +119,7 @@ def _to_snake_case(s: str) -> str: def _safe_field_name(name: str) -> str: """Ensure a field name is a valid Python identifier.""" + name = _sanitize_identifier(name) if keyword.iskeyword(name): return name + "_" return name diff --git a/python-client/river/streams.py b/python-client/river/streams.py index ae01c309..b1dc29e3 100644 --- a/python-client/river/streams.py +++ b/python-client/river/streams.py @@ -31,6 +31,8 @@ def _push_value(self, value: T) -> None: """Push a value into the readable stream (internal use).""" if self._closed: raise RuntimeError("Cannot push to a closed readable") + if self._broken: + return # Discard values after break to prevent unbounded buffering self._queue.append(value) self._notify_waiters() diff --git a/python-client/river/transport.py b/python-client/river/transport.py index 973153b3..f17a06c0 100644 --- a/python-client/river/transport.py +++ b/python-client/river/transport.py @@ -330,7 +330,9 @@ async def _do_handshake(self, session: Session, ws: Any, to: str) -> None: self._delete_session(to) self._try_reconnecting(to) else: - # Fatal handshake error — do not retry + # Fatal handshake error — do not retry. + # Delete the session so pending procedures get + # UNEXPECTED_DISCONNECT via the sessionStatus "closing" event. self._events.dispatch( "protocolError", { @@ -339,7 +341,7 @@ async def _do_handshake(self, session: Session, ws: Any, to: str) -> None: "code": code, }, ) - session.state = SessionState.NO_CONNECTION + self._delete_session(to) return # Check session ID match diff --git a/python-client/tests/test_equivalence.py b/python-client/tests/test_equivalence.py index 55607a54..7eb43897 100644 --- a/python-client/tests/test_equivalence.py +++ b/python-client/tests/test_equivalence.py @@ -638,3 +638,46 @@ async def test_concurrent_rpc_ordering(self, codec_and_url: tuple[Codec, str]): assert sorted(returned_ns) == list(range(n)) finally: await cleanup(client) + + @pytest.mark.asyncio + async def test_ordering_preserved_across_disconnects( + self, codec_and_url: tuple[Codec, str] + ): + """50 RPCs with forced disconnects at msg 10 and 42 — all arrive. + + Mirrors the TS e2e.test.ts 'message order is preserved in the face + of disconnects' test. + """ + codec, url = codec_and_url + client = await make_client(url, codec) + try: + session = client.transport.sessions.get("SERVER") + # Warm up connection + warm = await client.rpc("ordering", "add", {"n": -1}) + assert warm["ok"] is True + session = client.transport.sessions.get("SERVER") + assert session is not None + + tasks = [] + for i in range(50): + # Force-close WS at specific points + if i == 10 or i == 42: + ws = session._ws + if ws is not None: + await ws.close() + + tasks.append(client.rpc("ordering", "add", {"n": i})) + + results = await asyncio.gather(*tasks) + for r in results: + assert r["ok"] is True + + # Verify all 50 messages arrived at the server + get_result = await client.rpc("ordering", "getAll", {}) + assert get_result["ok"] is True + msgs = get_result["payload"]["msgs"] + # All values 0-49 should be present (plus the -1 warmup) + for i in range(50): + assert i in msgs, f"message {i} missing from server" + finally: + await cleanup(client) diff --git a/python-client/tests/test_server_handshake.mjs b/python-client/tests/test_server_handshake.mjs index a1307bbb..1a113014 100644 --- a/python-client/tests/test_server_handshake.mjs +++ b/python-client/tests/test_server_handshake.mjs @@ -1,9 +1,9 @@ // python-client/tests/test_server_handshake.ts -import http from 'node:http'; -import { WebSocketServer } from 'ws'; +import http from "node:http"; +import { WebSocketServer } from "ws"; // node_modules/nanoid/index.js -import { webcrypto as crypto } from 'node:crypto'; +import { webcrypto as crypto } from "node:crypto"; var POOL_SIZE_MULTIPLIER = 128; var pool; var poolOffset; @@ -19,19 +19,19 @@ function fillPool(bytes) { poolOffset += bytes; } function random(bytes) { - fillPool((bytes |= 0)); + fillPool(bytes |= 0); return pool.subarray(poolOffset - bytes, poolOffset); } function customRandom(alphabet2, defaultSize, getRandom) { - let mask = (2 << (31 - Math.clz32((alphabet2.length - 1) | 1))) - 1; - let step = Math.ceil((1.6 * mask * defaultSize) / alphabet2.length); + let mask = (2 << 31 - Math.clz32(alphabet2.length - 1 | 1)) - 1; + let step = Math.ceil(1.6 * mask * defaultSize / alphabet2.length); return (size = defaultSize) => { - let id = ''; + let id = ""; while (true) { let bytes = getRandom(step); let i = step; while (i--) { - id += alphabet2[bytes[i] & mask] || ''; + id += alphabet2[bytes[i] & mask] || ""; if (id.length >= size) return id; } } @@ -43,7 +43,7 @@ function customAlphabet(alphabet2, size = 21) { // transport/id.ts var alphabet = customAlphabet( - '1234567890abcdefghijklmnopqrstuvxyzABCDEFGHIJKLMNOPQRSTUVXYZ', + "1234567890abcdefghijklmnopqrstuvxyzABCDEFGHIJKLMNOPQRSTUVXYZ" ); var generateId = () => alphabet(12); @@ -60,7 +60,7 @@ var Connection = class { const spanContext = this.telemetry.span.spanContext(); metadata.telemetry = { traceId: spanContext.traceId, - spanId: spanContext.spanId, + spanId: spanContext.spanId }; } return metadata; @@ -143,7 +143,7 @@ var WebSocketConnection = class extends Connection { super(); this.ws = ws; this.extras = extras; - this.ws.binaryType = 'arraybuffer'; + this.ws.binaryType = "arraybuffer"; let didError = false; this.ws.onerror = () => { didError = true; @@ -173,10 +173,10 @@ var WebSocketConnection = class extends Connection { }; // node_modules/@opentelemetry/api/build/esm/platform/node/globalThis.js -var _globalThis = typeof globalThis === 'object' ? globalThis : global; +var _globalThis = typeof globalThis === "object" ? globalThis : global; // node_modules/@opentelemetry/api/build/esm/version.js -var VERSION = '1.8.0'; +var VERSION = "1.8.0"; // node_modules/@opentelemetry/api/build/esm/internal/semver.js var re = /^(\d+)\.(\d+)\.(\d+)(-(.+))?$/; @@ -185,7 +185,7 @@ function _makeCompatibilityCheck(ownVersion) { var rejectedVersions = /* @__PURE__ */ new Set(); var myVersionMatch = ownVersion.match(re); if (!myVersionMatch) { - return function () { + return function() { return false; }; } @@ -193,7 +193,7 @@ function _makeCompatibilityCheck(ownVersion) { major: +myVersionMatch[1], minor: +myVersionMatch[2], patch: +myVersionMatch[3], - prerelease: myVersionMatch[4], + prerelease: myVersionMatch[4] }; if (ownVersionParsed.prerelease != null) { return function isExactmatch(globalVersion) { @@ -223,7 +223,7 @@ function _makeCompatibilityCheck(ownVersion) { major: +globalVersionMatch[1], minor: +globalVersionMatch[2], patch: +globalVersionMatch[3], - prerelease: globalVersionMatch[4], + prerelease: globalVersionMatch[4] }; if (globalVersionParsed.prerelease != null) { return _reject(globalVersion); @@ -232,10 +232,7 @@ function _makeCompatibilityCheck(ownVersion) { return _reject(globalVersion); } if (ownVersionParsed.major === 0) { - if ( - ownVersionParsed.minor === globalVersionParsed.minor && - ownVersionParsed.patch <= globalVersionParsed.patch - ) { + if (ownVersionParsed.minor === globalVersionParsed.minor && ownVersionParsed.patch <= globalVersionParsed.patch) { return _accept(globalVersion); } return _reject(globalVersion); @@ -249,70 +246,41 @@ function _makeCompatibilityCheck(ownVersion) { var isCompatible = _makeCompatibilityCheck(VERSION); // node_modules/@opentelemetry/api/build/esm/internal/global-utils.js -var major = VERSION.split('.')[0]; -var GLOBAL_OPENTELEMETRY_API_KEY = Symbol.for('opentelemetry.js.api.' + major); +var major = VERSION.split(".")[0]; +var GLOBAL_OPENTELEMETRY_API_KEY = Symbol.for("opentelemetry.js.api." + major); var _global = _globalThis; function registerGlobal(type, instance, diag2, allowOverride) { var _a; if (allowOverride === void 0) { allowOverride = false; } - var api = (_global[GLOBAL_OPENTELEMETRY_API_KEY] = - (_a = _global[GLOBAL_OPENTELEMETRY_API_KEY]) !== null && _a !== void 0 - ? _a - : { - version: VERSION, - }); + var api = _global[GLOBAL_OPENTELEMETRY_API_KEY] = (_a = _global[GLOBAL_OPENTELEMETRY_API_KEY]) !== null && _a !== void 0 ? _a : { + version: VERSION + }; if (!allowOverride && api[type]) { - var err = new Error( - '@opentelemetry/api: Attempted duplicate registration of API: ' + type, - ); + var err = new Error("@opentelemetry/api: Attempted duplicate registration of API: " + type); diag2.error(err.stack || err.message); return false; } if (api.version !== VERSION) { - var err = new Error( - '@opentelemetry/api: Registration of version v' + - api.version + - ' for ' + - type + - ' does not match previously registered API v' + - VERSION, - ); + var err = new Error("@opentelemetry/api: Registration of version v" + api.version + " for " + type + " does not match previously registered API v" + VERSION); diag2.error(err.stack || err.message); return false; } api[type] = instance; - diag2.debug( - '@opentelemetry/api: Registered a global for ' + - type + - ' v' + - VERSION + - '.', - ); + diag2.debug("@opentelemetry/api: Registered a global for " + type + " v" + VERSION + "."); return true; } function getGlobal(type) { var _a, _b; - var globalVersion = - (_a = _global[GLOBAL_OPENTELEMETRY_API_KEY]) === null || _a === void 0 - ? void 0 - : _a.version; + var globalVersion = (_a = _global[GLOBAL_OPENTELEMETRY_API_KEY]) === null || _a === void 0 ? void 0 : _a.version; if (!globalVersion || !isCompatible(globalVersion)) { return; } - return (_b = _global[GLOBAL_OPENTELEMETRY_API_KEY]) === null || _b === void 0 - ? void 0 - : _b[type]; + return (_b = _global[GLOBAL_OPENTELEMETRY_API_KEY]) === null || _b === void 0 ? void 0 : _b[type]; } function unregisterGlobal(type, diag2) { - diag2.debug( - '@opentelemetry/api: Unregistering a global for ' + - type + - ' v' + - VERSION + - '.', - ); + diag2.debug("@opentelemetry/api: Unregistering a global for " + type + " v" + VERSION + "."); var api = _global[GLOBAL_OPENTELEMETRY_API_KEY]; if (api) { delete api[type]; @@ -320,81 +288,78 @@ function unregisterGlobal(type, diag2) { } // node_modules/@opentelemetry/api/build/esm/diag/ComponentLogger.js -var __read = function (o, n) { - var m = typeof Symbol === 'function' && o[Symbol.iterator]; +var __read = function(o, n) { + var m = typeof Symbol === "function" && o[Symbol.iterator]; if (!m) return o; - var i = m.call(o), - r, - ar = [], - e; + var i = m.call(o), r, ar = [], e; try { while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value); } catch (error) { e = { error }; } finally { try { - if (r && !r.done && (m = i['return'])) m.call(i); + if (r && !r.done && (m = i["return"])) m.call(i); } finally { if (e) throw e.error; } } return ar; }; -var __spreadArray = function (to, from, pack) { - if (pack || arguments.length === 2) - for (var i = 0, l = from.length, ar; i < l; i++) { - if (ar || !(i in from)) { - if (!ar) ar = Array.prototype.slice.call(from, 0, i); - ar[i] = from[i]; - } +var __spreadArray = function(to, from, pack) { + if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) { + if (ar || !(i in from)) { + if (!ar) ar = Array.prototype.slice.call(from, 0, i); + ar[i] = from[i]; } + } return to.concat(ar || Array.prototype.slice.call(from)); }; -var DiagComponentLogger = +var DiagComponentLogger = ( /** @class */ - (function () { + function() { function DiagComponentLogger2(props) { - this._namespace = props.namespace || 'DiagComponentLogger'; + this._namespace = props.namespace || "DiagComponentLogger"; } - DiagComponentLogger2.prototype.debug = function () { + DiagComponentLogger2.prototype.debug = function() { var args = []; for (var _i = 0; _i < arguments.length; _i++) { args[_i] = arguments[_i]; } - return logProxy('debug', this._namespace, args); + return logProxy("debug", this._namespace, args); }; - DiagComponentLogger2.prototype.error = function () { + DiagComponentLogger2.prototype.error = function() { var args = []; for (var _i = 0; _i < arguments.length; _i++) { args[_i] = arguments[_i]; } - return logProxy('error', this._namespace, args); + return logProxy("error", this._namespace, args); }; - DiagComponentLogger2.prototype.info = function () { + DiagComponentLogger2.prototype.info = function() { var args = []; for (var _i = 0; _i < arguments.length; _i++) { args[_i] = arguments[_i]; } - return logProxy('info', this._namespace, args); + return logProxy("info", this._namespace, args); }; - DiagComponentLogger2.prototype.warn = function () { + DiagComponentLogger2.prototype.warn = function() { var args = []; for (var _i = 0; _i < arguments.length; _i++) { args[_i] = arguments[_i]; } - return logProxy('warn', this._namespace, args); + return logProxy("warn", this._namespace, args); }; - DiagComponentLogger2.prototype.verbose = function () { + DiagComponentLogger2.prototype.verbose = function() { var args = []; for (var _i = 0; _i < arguments.length; _i++) { args[_i] = arguments[_i]; } - return logProxy('verbose', this._namespace, args); + return logProxy("verbose", this._namespace, args); }; return DiagComponentLogger2; - })(); + }() +); function logProxy(funcName, namespace, args) { - var logger = getGlobal('diag'); + var logger = getGlobal("diag"); if (!logger) { return; } @@ -404,14 +369,14 @@ function logProxy(funcName, namespace, args) { // node_modules/@opentelemetry/api/build/esm/diag/types.js var DiagLogLevel; -(function (DiagLogLevel2) { - DiagLogLevel2[(DiagLogLevel2['NONE'] = 0)] = 'NONE'; - DiagLogLevel2[(DiagLogLevel2['ERROR'] = 30)] = 'ERROR'; - DiagLogLevel2[(DiagLogLevel2['WARN'] = 50)] = 'WARN'; - DiagLogLevel2[(DiagLogLevel2['INFO'] = 60)] = 'INFO'; - DiagLogLevel2[(DiagLogLevel2['DEBUG'] = 70)] = 'DEBUG'; - DiagLogLevel2[(DiagLogLevel2['VERBOSE'] = 80)] = 'VERBOSE'; - DiagLogLevel2[(DiagLogLevel2['ALL'] = 9999)] = 'ALL'; +(function(DiagLogLevel2) { + DiagLogLevel2[DiagLogLevel2["NONE"] = 0] = "NONE"; + DiagLogLevel2[DiagLogLevel2["ERROR"] = 30] = "ERROR"; + DiagLogLevel2[DiagLogLevel2["WARN"] = 50] = "WARN"; + DiagLogLevel2[DiagLogLevel2["INFO"] = 60] = "INFO"; + DiagLogLevel2[DiagLogLevel2["DEBUG"] = 70] = "DEBUG"; + DiagLogLevel2[DiagLogLevel2["VERBOSE"] = 80] = "VERBOSE"; + DiagLogLevel2[DiagLogLevel2["ALL"] = 9999] = "ALL"; })(DiagLogLevel || (DiagLogLevel = {})); // node_modules/@opentelemetry/api/build/esm/diag/internal/logLevelLogger.js @@ -424,201 +389,172 @@ function createLogLevelDiagLogger(maxLevel, logger) { logger = logger || {}; function _filterFunc(funcName, theLevel) { var theFunc = logger[funcName]; - if (typeof theFunc === 'function' && maxLevel >= theLevel) { + if (typeof theFunc === "function" && maxLevel >= theLevel) { return theFunc.bind(logger); } - return function () {}; + return function() { + }; } return { - error: _filterFunc('error', DiagLogLevel.ERROR), - warn: _filterFunc('warn', DiagLogLevel.WARN), - info: _filterFunc('info', DiagLogLevel.INFO), - debug: _filterFunc('debug', DiagLogLevel.DEBUG), - verbose: _filterFunc('verbose', DiagLogLevel.VERBOSE), + error: _filterFunc("error", DiagLogLevel.ERROR), + warn: _filterFunc("warn", DiagLogLevel.WARN), + info: _filterFunc("info", DiagLogLevel.INFO), + debug: _filterFunc("debug", DiagLogLevel.DEBUG), + verbose: _filterFunc("verbose", DiagLogLevel.VERBOSE) }; } // node_modules/@opentelemetry/api/build/esm/api/diag.js -var __read2 = function (o, n) { - var m = typeof Symbol === 'function' && o[Symbol.iterator]; +var __read2 = function(o, n) { + var m = typeof Symbol === "function" && o[Symbol.iterator]; if (!m) return o; - var i = m.call(o), - r, - ar = [], - e; + var i = m.call(o), r, ar = [], e; try { while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value); } catch (error) { e = { error }; } finally { try { - if (r && !r.done && (m = i['return'])) m.call(i); + if (r && !r.done && (m = i["return"])) m.call(i); } finally { if (e) throw e.error; } } return ar; }; -var __spreadArray2 = function (to, from, pack) { - if (pack || arguments.length === 2) - for (var i = 0, l = from.length, ar; i < l; i++) { - if (ar || !(i in from)) { - if (!ar) ar = Array.prototype.slice.call(from, 0, i); - ar[i] = from[i]; - } +var __spreadArray2 = function(to, from, pack) { + if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) { + if (ar || !(i in from)) { + if (!ar) ar = Array.prototype.slice.call(from, 0, i); + ar[i] = from[i]; } + } return to.concat(ar || Array.prototype.slice.call(from)); }; -var API_NAME = 'diag'; -var DiagAPI = +var API_NAME = "diag"; +var DiagAPI = ( /** @class */ - (function () { + function() { function DiagAPI2() { function _logProxy(funcName) { - return function () { + return function() { var args = []; for (var _i = 0; _i < arguments.length; _i++) { args[_i] = arguments[_i]; } - var logger = getGlobal('diag'); - if (!logger) return; - return logger[funcName].apply( - logger, - __spreadArray2([], __read2(args), false), - ); + var logger = getGlobal("diag"); + if (!logger) + return; + return logger[funcName].apply(logger, __spreadArray2([], __read2(args), false)); }; } var self = this; - var setLogger = function (logger, optionsOrLogLevel) { + var setLogger = function(logger, optionsOrLogLevel) { var _a, _b, _c; if (optionsOrLogLevel === void 0) { optionsOrLogLevel = { logLevel: DiagLogLevel.INFO }; } if (logger === self) { - var err = new Error( - 'Cannot use diag as the logger for itself. Please use a DiagLogger implementation like ConsoleDiagLogger or a custom implementation', - ); - self.error( - (_a = err.stack) !== null && _a !== void 0 ? _a : err.message, - ); + var err = new Error("Cannot use diag as the logger for itself. Please use a DiagLogger implementation like ConsoleDiagLogger or a custom implementation"); + self.error((_a = err.stack) !== null && _a !== void 0 ? _a : err.message); return false; } - if (typeof optionsOrLogLevel === 'number') { + if (typeof optionsOrLogLevel === "number") { optionsOrLogLevel = { - logLevel: optionsOrLogLevel, + logLevel: optionsOrLogLevel }; } - var oldLogger = getGlobal('diag'); - var newLogger = createLogLevelDiagLogger( - (_b = optionsOrLogLevel.logLevel) !== null && _b !== void 0 - ? _b - : DiagLogLevel.INFO, - logger, - ); + var oldLogger = getGlobal("diag"); + var newLogger = createLogLevelDiagLogger((_b = optionsOrLogLevel.logLevel) !== null && _b !== void 0 ? _b : DiagLogLevel.INFO, logger); if (oldLogger && !optionsOrLogLevel.suppressOverrideMessage) { - var stack = - (_c = new Error().stack) !== null && _c !== void 0 - ? _c - : ''; - oldLogger.warn('Current logger will be overwritten from ' + stack); - newLogger.warn( - 'Current logger will overwrite one already registered from ' + - stack, - ); + var stack = (_c = new Error().stack) !== null && _c !== void 0 ? _c : ""; + oldLogger.warn("Current logger will be overwritten from " + stack); + newLogger.warn("Current logger will overwrite one already registered from " + stack); } - return registerGlobal('diag', newLogger, self, true); + return registerGlobal("diag", newLogger, self, true); }; self.setLogger = setLogger; - self.disable = function () { + self.disable = function() { unregisterGlobal(API_NAME, self); }; - self.createComponentLogger = function (options) { + self.createComponentLogger = function(options) { return new DiagComponentLogger(options); }; - self.verbose = _logProxy('verbose'); - self.debug = _logProxy('debug'); - self.info = _logProxy('info'); - self.warn = _logProxy('warn'); - self.error = _logProxy('error'); + self.verbose = _logProxy("verbose"); + self.debug = _logProxy("debug"); + self.info = _logProxy("info"); + self.warn = _logProxy("warn"); + self.error = _logProxy("error"); } - DiagAPI2.instance = function () { + DiagAPI2.instance = function() { if (!this._instance) { this._instance = new DiagAPI2(); } return this._instance; }; return DiagAPI2; - })(); + }() +); // node_modules/@opentelemetry/api/build/esm/baggage/internal/baggage-impl.js -var __read3 = function (o, n) { - var m = typeof Symbol === 'function' && o[Symbol.iterator]; +var __read3 = function(o, n) { + var m = typeof Symbol === "function" && o[Symbol.iterator]; if (!m) return o; - var i = m.call(o), - r, - ar = [], - e; + var i = m.call(o), r, ar = [], e; try { while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value); } catch (error) { e = { error }; } finally { try { - if (r && !r.done && (m = i['return'])) m.call(i); + if (r && !r.done && (m = i["return"])) m.call(i); } finally { if (e) throw e.error; } } return ar; }; -var __values = function (o) { - var s = typeof Symbol === 'function' && Symbol.iterator, - m = s && o[s], - i = 0; +var __values = function(o) { + var s = typeof Symbol === "function" && Symbol.iterator, m = s && o[s], i = 0; if (m) return m.call(o); - if (o && typeof o.length === 'number') - return { - next: function () { - if (o && i >= o.length) o = void 0; - return { value: o && o[i++], done: !o }; - }, - }; - throw new TypeError( - s ? 'Object is not iterable.' : 'Symbol.iterator is not defined.', - ); + if (o && typeof o.length === "number") return { + next: function() { + if (o && i >= o.length) o = void 0; + return { value: o && o[i++], done: !o }; + } + }; + throw new TypeError(s ? "Object is not iterable." : "Symbol.iterator is not defined."); }; -var BaggageImpl = +var BaggageImpl = ( /** @class */ - (function () { + function() { function BaggageImpl2(entries) { this._entries = entries ? new Map(entries) : /* @__PURE__ */ new Map(); } - BaggageImpl2.prototype.getEntry = function (key) { + BaggageImpl2.prototype.getEntry = function(key) { var entry = this._entries.get(key); if (!entry) { return void 0; } return Object.assign({}, entry); }; - BaggageImpl2.prototype.getAllEntries = function () { - return Array.from(this._entries.entries()).map(function (_a) { - var _b = __read3(_a, 2), - k = _b[0], - v = _b[1]; + BaggageImpl2.prototype.getAllEntries = function() { + return Array.from(this._entries.entries()).map(function(_a) { + var _b = __read3(_a, 2), k = _b[0], v = _b[1]; return [k, v]; }); }; - BaggageImpl2.prototype.setEntry = function (key, entry) { + BaggageImpl2.prototype.setEntry = function(key, entry) { var newBaggage = new BaggageImpl2(this._entries); newBaggage._entries.set(key, entry); return newBaggage; }; - BaggageImpl2.prototype.removeEntry = function (key) { + BaggageImpl2.prototype.removeEntry = function(key) { var newBaggage = new BaggageImpl2(this._entries); newBaggage._entries.delete(key); return newBaggage; }; - BaggageImpl2.prototype.removeEntries = function () { + BaggageImpl2.prototype.removeEntries = function() { var e_1, _a; var keys = []; for (var _i = 0; _i < arguments.length; _i++) { @@ -626,11 +562,7 @@ var BaggageImpl = } var newBaggage = new BaggageImpl2(this._entries); try { - for ( - var keys_1 = __values(keys), keys_1_1 = keys_1.next(); - !keys_1_1.done; - keys_1_1 = keys_1.next() - ) { + for (var keys_1 = __values(keys), keys_1_1 = keys_1.next(); !keys_1_1.done; keys_1_1 = keys_1.next()) { var key = keys_1_1.value; newBaggage._entries.delete(key); } @@ -638,19 +570,19 @@ var BaggageImpl = e_1 = { error: e_1_1 }; } finally { try { - if (keys_1_1 && !keys_1_1.done && (_a = keys_1.return)) - _a.call(keys_1); + if (keys_1_1 && !keys_1_1.done && (_a = keys_1.return)) _a.call(keys_1); } finally { if (e_1) throw e_1.error; } } return newBaggage; }; - BaggageImpl2.prototype.clear = function () { + BaggageImpl2.prototype.clear = function() { return new BaggageImpl2(); }; return BaggageImpl2; - })(); + }() +); // node_modules/@opentelemetry/api/build/esm/baggage/utils.js var diag = DiagAPI.instance(); @@ -665,243 +597,235 @@ function createBaggage(entries) { function createContextKey(description) { return Symbol.for(description); } -var BaseContext = +var BaseContext = ( /** @class */ - /* @__PURE__ */ (function () { + /* @__PURE__ */ function() { function BaseContext2(parentContext) { var self = this; - self._currentContext = parentContext - ? new Map(parentContext) - : /* @__PURE__ */ new Map(); - self.getValue = function (key) { + self._currentContext = parentContext ? new Map(parentContext) : /* @__PURE__ */ new Map(); + self.getValue = function(key) { return self._currentContext.get(key); }; - self.setValue = function (key, value) { + self.setValue = function(key, value) { var context2 = new BaseContext2(self._currentContext); context2._currentContext.set(key, value); return context2; }; - self.deleteValue = function (key) { + self.deleteValue = function(key) { var context2 = new BaseContext2(self._currentContext); context2._currentContext.delete(key); return context2; }; } return BaseContext2; - })(); + }() +); var ROOT_CONTEXT = new BaseContext(); // node_modules/@opentelemetry/api/build/esm/propagation/TextMapPropagator.js var defaultTextMapGetter = { - get: function (carrier, key) { + get: function(carrier, key) { if (carrier == null) { return void 0; } return carrier[key]; }, - keys: function (carrier) { + keys: function(carrier) { if (carrier == null) { return []; } return Object.keys(carrier); - }, + } }; var defaultTextMapSetter = { - set: function (carrier, key, value) { + set: function(carrier, key, value) { if (carrier == null) { return; } carrier[key] = value; - }, + } }; // node_modules/@opentelemetry/api/build/esm/context/NoopContextManager.js -var __read4 = function (o, n) { - var m = typeof Symbol === 'function' && o[Symbol.iterator]; +var __read4 = function(o, n) { + var m = typeof Symbol === "function" && o[Symbol.iterator]; if (!m) return o; - var i = m.call(o), - r, - ar = [], - e; + var i = m.call(o), r, ar = [], e; try { while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value); } catch (error) { e = { error }; } finally { try { - if (r && !r.done && (m = i['return'])) m.call(i); + if (r && !r.done && (m = i["return"])) m.call(i); } finally { if (e) throw e.error; } } return ar; }; -var __spreadArray3 = function (to, from, pack) { - if (pack || arguments.length === 2) - for (var i = 0, l = from.length, ar; i < l; i++) { - if (ar || !(i in from)) { - if (!ar) ar = Array.prototype.slice.call(from, 0, i); - ar[i] = from[i]; - } +var __spreadArray3 = function(to, from, pack) { + if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) { + if (ar || !(i in from)) { + if (!ar) ar = Array.prototype.slice.call(from, 0, i); + ar[i] = from[i]; } + } return to.concat(ar || Array.prototype.slice.call(from)); }; -var NoopContextManager = +var NoopContextManager = ( /** @class */ - (function () { - function NoopContextManager2() {} - NoopContextManager2.prototype.active = function () { + function() { + function NoopContextManager2() { + } + NoopContextManager2.prototype.active = function() { return ROOT_CONTEXT; }; - NoopContextManager2.prototype.with = function (_context, fn, thisArg) { + NoopContextManager2.prototype.with = function(_context, fn, thisArg) { var args = []; for (var _i = 3; _i < arguments.length; _i++) { args[_i - 3] = arguments[_i]; } return fn.call.apply(fn, __spreadArray3([thisArg], __read4(args), false)); }; - NoopContextManager2.prototype.bind = function (_context, target) { + NoopContextManager2.prototype.bind = function(_context, target) { return target; }; - NoopContextManager2.prototype.enable = function () { + NoopContextManager2.prototype.enable = function() { return this; }; - NoopContextManager2.prototype.disable = function () { + NoopContextManager2.prototype.disable = function() { return this; }; return NoopContextManager2; - })(); + }() +); // node_modules/@opentelemetry/api/build/esm/api/context.js -var __read5 = function (o, n) { - var m = typeof Symbol === 'function' && o[Symbol.iterator]; +var __read5 = function(o, n) { + var m = typeof Symbol === "function" && o[Symbol.iterator]; if (!m) return o; - var i = m.call(o), - r, - ar = [], - e; + var i = m.call(o), r, ar = [], e; try { while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value); } catch (error) { e = { error }; } finally { try { - if (r && !r.done && (m = i['return'])) m.call(i); + if (r && !r.done && (m = i["return"])) m.call(i); } finally { if (e) throw e.error; } } return ar; }; -var __spreadArray4 = function (to, from, pack) { - if (pack || arguments.length === 2) - for (var i = 0, l = from.length, ar; i < l; i++) { - if (ar || !(i in from)) { - if (!ar) ar = Array.prototype.slice.call(from, 0, i); - ar[i] = from[i]; - } +var __spreadArray4 = function(to, from, pack) { + if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) { + if (ar || !(i in from)) { + if (!ar) ar = Array.prototype.slice.call(from, 0, i); + ar[i] = from[i]; } + } return to.concat(ar || Array.prototype.slice.call(from)); }; -var API_NAME2 = 'context'; +var API_NAME2 = "context"; var NOOP_CONTEXT_MANAGER = new NoopContextManager(); -var ContextAPI = +var ContextAPI = ( /** @class */ - (function () { - function ContextAPI2() {} - ContextAPI2.getInstance = function () { + function() { + function ContextAPI2() { + } + ContextAPI2.getInstance = function() { if (!this._instance) { this._instance = new ContextAPI2(); } return this._instance; }; - ContextAPI2.prototype.setGlobalContextManager = function (contextManager) { + ContextAPI2.prototype.setGlobalContextManager = function(contextManager) { return registerGlobal(API_NAME2, contextManager, DiagAPI.instance()); }; - ContextAPI2.prototype.active = function () { + ContextAPI2.prototype.active = function() { return this._getContextManager().active(); }; - ContextAPI2.prototype.with = function (context2, fn, thisArg) { + ContextAPI2.prototype.with = function(context2, fn, thisArg) { var _a; var args = []; for (var _i = 3; _i < arguments.length; _i++) { args[_i - 3] = arguments[_i]; } - return (_a = this._getContextManager()).with.apply( - _a, - __spreadArray4([context2, fn, thisArg], __read5(args), false), - ); + return (_a = this._getContextManager()).with.apply(_a, __spreadArray4([context2, fn, thisArg], __read5(args), false)); }; - ContextAPI2.prototype.bind = function (context2, target) { + ContextAPI2.prototype.bind = function(context2, target) { return this._getContextManager().bind(context2, target); }; - ContextAPI2.prototype._getContextManager = function () { + ContextAPI2.prototype._getContextManager = function() { return getGlobal(API_NAME2) || NOOP_CONTEXT_MANAGER; }; - ContextAPI2.prototype.disable = function () { + ContextAPI2.prototype.disable = function() { this._getContextManager().disable(); unregisterGlobal(API_NAME2, DiagAPI.instance()); }; return ContextAPI2; - })(); + }() +); // node_modules/@opentelemetry/api/build/esm/trace/trace_flags.js var TraceFlags; -(function (TraceFlags2) { - TraceFlags2[(TraceFlags2['NONE'] = 0)] = 'NONE'; - TraceFlags2[(TraceFlags2['SAMPLED'] = 1)] = 'SAMPLED'; +(function(TraceFlags2) { + TraceFlags2[TraceFlags2["NONE"] = 0] = "NONE"; + TraceFlags2[TraceFlags2["SAMPLED"] = 1] = "SAMPLED"; })(TraceFlags || (TraceFlags = {})); // node_modules/@opentelemetry/api/build/esm/trace/invalid-span-constants.js -var INVALID_SPANID = '0000000000000000'; -var INVALID_TRACEID = '00000000000000000000000000000000'; +var INVALID_SPANID = "0000000000000000"; +var INVALID_TRACEID = "00000000000000000000000000000000"; var INVALID_SPAN_CONTEXT = { traceId: INVALID_TRACEID, spanId: INVALID_SPANID, - traceFlags: TraceFlags.NONE, + traceFlags: TraceFlags.NONE }; // node_modules/@opentelemetry/api/build/esm/trace/NonRecordingSpan.js -var NonRecordingSpan = +var NonRecordingSpan = ( /** @class */ - (function () { + function() { function NonRecordingSpan2(_spanContext) { if (_spanContext === void 0) { _spanContext = INVALID_SPAN_CONTEXT; } this._spanContext = _spanContext; } - NonRecordingSpan2.prototype.spanContext = function () { + NonRecordingSpan2.prototype.spanContext = function() { return this._spanContext; }; - NonRecordingSpan2.prototype.setAttribute = function (_key, _value) { + NonRecordingSpan2.prototype.setAttribute = function(_key, _value) { return this; }; - NonRecordingSpan2.prototype.setAttributes = function (_attributes) { + NonRecordingSpan2.prototype.setAttributes = function(_attributes) { return this; }; - NonRecordingSpan2.prototype.addEvent = function (_name, _attributes) { + NonRecordingSpan2.prototype.addEvent = function(_name, _attributes) { return this; }; - NonRecordingSpan2.prototype.setStatus = function (_status) { + NonRecordingSpan2.prototype.setStatus = function(_status) { return this; }; - NonRecordingSpan2.prototype.updateName = function (_name) { + NonRecordingSpan2.prototype.updateName = function(_name) { return this; }; - NonRecordingSpan2.prototype.end = function (_endTime) {}; - NonRecordingSpan2.prototype.isRecording = function () { + NonRecordingSpan2.prototype.end = function(_endTime) { + }; + NonRecordingSpan2.prototype.isRecording = function() { return false; }; - NonRecordingSpan2.prototype.recordException = function ( - _exception, - _time, - ) {}; + NonRecordingSpan2.prototype.recordException = function(_exception, _time) { + }; return NonRecordingSpan2; - })(); + }() +); // node_modules/@opentelemetry/api/build/esm/trace/context-utils.js -var SPAN_KEY = createContextKey('OpenTelemetry Context Key SPAN'); +var SPAN_KEY = createContextKey("OpenTelemetry Context Key SPAN"); function getSpan(context2) { return context2.getValue(SPAN_KEY) || void 0; } @@ -919,9 +843,7 @@ function setSpanContext(context2, spanContext) { } function getSpanContext(context2) { var _a; - return (_a = getSpan(context2)) === null || _a === void 0 - ? void 0 - : _a.spanContext(); + return (_a = getSpan(context2)) === null || _a === void 0 ? void 0 : _a.spanContext(); } // node_modules/@opentelemetry/api/build/esm/trace/spancontext-utils.js @@ -934,9 +856,7 @@ function isValidSpanId(spanId) { return VALID_SPANID_REGEX.test(spanId) && spanId !== INVALID_SPANID; } function isSpanContextValid(spanContext) { - return ( - isValidTraceId(spanContext.traceId) && isValidSpanId(spanContext.spanId) - ); + return isValidTraceId(spanContext.traceId) && isValidSpanId(spanContext.spanId); } function wrapSpanContext(spanContext) { return new NonRecordingSpan(spanContext); @@ -944,31 +864,27 @@ function wrapSpanContext(spanContext) { // node_modules/@opentelemetry/api/build/esm/trace/NoopTracer.js var contextApi = ContextAPI.getInstance(); -var NoopTracer = +var NoopTracer = ( /** @class */ - (function () { - function NoopTracer2() {} - NoopTracer2.prototype.startSpan = function (name, options, context2) { + function() { + function NoopTracer2() { + } + NoopTracer2.prototype.startSpan = function(name, options, context2) { if (context2 === void 0) { context2 = contextApi.active(); } - var root = Boolean( - options === null || options === void 0 ? void 0 : options.root, - ); + var root = Boolean(options === null || options === void 0 ? void 0 : options.root); if (root) { return new NonRecordingSpan(); } var parentFromContext = context2 && getSpanContext(context2); - if ( - isSpanContext(parentFromContext) && - isSpanContextValid(parentFromContext) - ) { + if (isSpanContext(parentFromContext) && isSpanContextValid(parentFromContext)) { return new NonRecordingSpan(parentFromContext); } else { return new NonRecordingSpan(); } }; - NoopTracer2.prototype.startActiveSpan = function (name, arg2, arg3, arg4) { + NoopTracer2.prototype.startActiveSpan = function(name, arg2, arg3, arg4) { var opts; var ctx; var fn; @@ -984,55 +900,41 @@ var NoopTracer = ctx = arg3; fn = arg4; } - var parentContext = - ctx !== null && ctx !== void 0 ? ctx : contextApi.active(); + var parentContext = ctx !== null && ctx !== void 0 ? ctx : contextApi.active(); var span = this.startSpan(name, opts, parentContext); var contextWithSpanSet = setSpan(parentContext, span); return contextApi.with(contextWithSpanSet, fn, void 0, span); }; return NoopTracer2; - })(); + }() +); function isSpanContext(spanContext) { - return ( - typeof spanContext === 'object' && - typeof spanContext['spanId'] === 'string' && - typeof spanContext['traceId'] === 'string' && - typeof spanContext['traceFlags'] === 'number' - ); + return typeof spanContext === "object" && typeof spanContext["spanId"] === "string" && typeof spanContext["traceId"] === "string" && typeof spanContext["traceFlags"] === "number"; } // node_modules/@opentelemetry/api/build/esm/trace/ProxyTracer.js var NOOP_TRACER = new NoopTracer(); -var ProxyTracer = +var ProxyTracer = ( /** @class */ - (function () { + function() { function ProxyTracer2(_provider, name, version2, options) { this._provider = _provider; this.name = name; this.version = version2; this.options = options; } - ProxyTracer2.prototype.startSpan = function (name, options, context2) { + ProxyTracer2.prototype.startSpan = function(name, options, context2) { return this._getTracer().startSpan(name, options, context2); }; - ProxyTracer2.prototype.startActiveSpan = function ( - _name, - _options, - _context, - _fn, - ) { + ProxyTracer2.prototype.startActiveSpan = function(_name, _options, _context, _fn) { var tracer = this._getTracer(); return Reflect.apply(tracer.startActiveSpan, tracer, arguments); }; - ProxyTracer2.prototype._getTracer = function () { + ProxyTracer2.prototype._getTracer = function() { if (this._delegate) { return this._delegate; } - var tracer = this._provider.getDelegateTracer( - this.name, - this.version, - this.options, - ); + var tracer = this._provider.getDelegateTracer(this.name, this.version, this.options); if (!tracer) { return NOOP_TRACER; } @@ -1040,100 +942,89 @@ var ProxyTracer = return this._delegate; }; return ProxyTracer2; - })(); + }() +); // node_modules/@opentelemetry/api/build/esm/trace/NoopTracerProvider.js -var NoopTracerProvider = +var NoopTracerProvider = ( /** @class */ - (function () { - function NoopTracerProvider2() {} - NoopTracerProvider2.prototype.getTracer = function ( - _name, - _version, - _options, - ) { + function() { + function NoopTracerProvider2() { + } + NoopTracerProvider2.prototype.getTracer = function(_name, _version, _options) { return new NoopTracer(); }; return NoopTracerProvider2; - })(); + }() +); // node_modules/@opentelemetry/api/build/esm/trace/ProxyTracerProvider.js var NOOP_TRACER_PROVIDER = new NoopTracerProvider(); -var ProxyTracerProvider = +var ProxyTracerProvider = ( /** @class */ - (function () { - function ProxyTracerProvider2() {} - ProxyTracerProvider2.prototype.getTracer = function ( - name, - version2, - options, - ) { + function() { + function ProxyTracerProvider2() { + } + ProxyTracerProvider2.prototype.getTracer = function(name, version2, options) { var _a; - return (_a = this.getDelegateTracer(name, version2, options)) !== null && - _a !== void 0 - ? _a - : new ProxyTracer(this, name, version2, options); + return (_a = this.getDelegateTracer(name, version2, options)) !== null && _a !== void 0 ? _a : new ProxyTracer(this, name, version2, options); }; - ProxyTracerProvider2.prototype.getDelegate = function () { + ProxyTracerProvider2.prototype.getDelegate = function() { var _a; - return (_a = this._delegate) !== null && _a !== void 0 - ? _a - : NOOP_TRACER_PROVIDER; + return (_a = this._delegate) !== null && _a !== void 0 ? _a : NOOP_TRACER_PROVIDER; }; - ProxyTracerProvider2.prototype.setDelegate = function (delegate) { + ProxyTracerProvider2.prototype.setDelegate = function(delegate) { this._delegate = delegate; }; - ProxyTracerProvider2.prototype.getDelegateTracer = function ( - name, - version2, - options, - ) { + ProxyTracerProvider2.prototype.getDelegateTracer = function(name, version2, options) { var _a; - return (_a = this._delegate) === null || _a === void 0 - ? void 0 - : _a.getTracer(name, version2, options); + return (_a = this._delegate) === null || _a === void 0 ? void 0 : _a.getTracer(name, version2, options); }; return ProxyTracerProvider2; - })(); + }() +); // node_modules/@opentelemetry/api/build/esm/trace/span_kind.js var SpanKind; -(function (SpanKind2) { - SpanKind2[(SpanKind2['INTERNAL'] = 0)] = 'INTERNAL'; - SpanKind2[(SpanKind2['SERVER'] = 1)] = 'SERVER'; - SpanKind2[(SpanKind2['CLIENT'] = 2)] = 'CLIENT'; - SpanKind2[(SpanKind2['PRODUCER'] = 3)] = 'PRODUCER'; - SpanKind2[(SpanKind2['CONSUMER'] = 4)] = 'CONSUMER'; +(function(SpanKind2) { + SpanKind2[SpanKind2["INTERNAL"] = 0] = "INTERNAL"; + SpanKind2[SpanKind2["SERVER"] = 1] = "SERVER"; + SpanKind2[SpanKind2["CLIENT"] = 2] = "CLIENT"; + SpanKind2[SpanKind2["PRODUCER"] = 3] = "PRODUCER"; + SpanKind2[SpanKind2["CONSUMER"] = 4] = "CONSUMER"; })(SpanKind || (SpanKind = {})); // node_modules/@opentelemetry/api/build/esm/trace/status.js var SpanStatusCode; -(function (SpanStatusCode2) { - SpanStatusCode2[(SpanStatusCode2['UNSET'] = 0)] = 'UNSET'; - SpanStatusCode2[(SpanStatusCode2['OK'] = 1)] = 'OK'; - SpanStatusCode2[(SpanStatusCode2['ERROR'] = 2)] = 'ERROR'; +(function(SpanStatusCode2) { + SpanStatusCode2[SpanStatusCode2["UNSET"] = 0] = "UNSET"; + SpanStatusCode2[SpanStatusCode2["OK"] = 1] = "OK"; + SpanStatusCode2[SpanStatusCode2["ERROR"] = 2] = "ERROR"; })(SpanStatusCode || (SpanStatusCode = {})); // node_modules/@opentelemetry/api/build/esm/context-api.js var context = ContextAPI.getInstance(); // node_modules/@opentelemetry/api/build/esm/propagation/NoopTextMapPropagator.js -var NoopTextMapPropagator = +var NoopTextMapPropagator = ( /** @class */ - (function () { - function NoopTextMapPropagator2() {} - NoopTextMapPropagator2.prototype.inject = function (_context, _carrier) {}; - NoopTextMapPropagator2.prototype.extract = function (context2, _carrier) { + function() { + function NoopTextMapPropagator2() { + } + NoopTextMapPropagator2.prototype.inject = function(_context, _carrier) { + }; + NoopTextMapPropagator2.prototype.extract = function(context2, _carrier) { return context2; }; - NoopTextMapPropagator2.prototype.fields = function () { + NoopTextMapPropagator2.prototype.fields = function() { return []; }; return NoopTextMapPropagator2; - })(); + }() +); // node_modules/@opentelemetry/api/build/esm/baggage/context-helpers.js -var BAGGAGE_KEY = createContextKey('OpenTelemetry Baggage Key'); +var BAGGAGE_KEY = createContextKey("OpenTelemetry Baggage Key"); function getBaggage(context2) { return context2.getValue(BAGGAGE_KEY) || void 0; } @@ -1148,11 +1039,11 @@ function deleteBaggage(context2) { } // node_modules/@opentelemetry/api/build/esm/api/propagation.js -var API_NAME3 = 'propagation'; +var API_NAME3 = "propagation"; var NOOP_TEXT_MAP_PROPAGATOR = new NoopTextMapPropagator(); -var PropagationAPI = +var PropagationAPI = ( /** @class */ - (function () { + function() { function PropagationAPI2() { this.createBaggage = createBaggage; this.getBaggage = getBaggage; @@ -1160,47 +1051,48 @@ var PropagationAPI = this.setBaggage = setBaggage; this.deleteBaggage = deleteBaggage; } - PropagationAPI2.getInstance = function () { + PropagationAPI2.getInstance = function() { if (!this._instance) { this._instance = new PropagationAPI2(); } return this._instance; }; - PropagationAPI2.prototype.setGlobalPropagator = function (propagator) { + PropagationAPI2.prototype.setGlobalPropagator = function(propagator) { return registerGlobal(API_NAME3, propagator, DiagAPI.instance()); }; - PropagationAPI2.prototype.inject = function (context2, carrier, setter) { + PropagationAPI2.prototype.inject = function(context2, carrier, setter) { if (setter === void 0) { setter = defaultTextMapSetter; } return this._getGlobalPropagator().inject(context2, carrier, setter); }; - PropagationAPI2.prototype.extract = function (context2, carrier, getter) { + PropagationAPI2.prototype.extract = function(context2, carrier, getter) { if (getter === void 0) { getter = defaultTextMapGetter; } return this._getGlobalPropagator().extract(context2, carrier, getter); }; - PropagationAPI2.prototype.fields = function () { + PropagationAPI2.prototype.fields = function() { return this._getGlobalPropagator().fields(); }; - PropagationAPI2.prototype.disable = function () { + PropagationAPI2.prototype.disable = function() { unregisterGlobal(API_NAME3, DiagAPI.instance()); }; - PropagationAPI2.prototype._getGlobalPropagator = function () { + PropagationAPI2.prototype._getGlobalPropagator = function() { return getGlobal(API_NAME3) || NOOP_TEXT_MAP_PROPAGATOR; }; return PropagationAPI2; - })(); + }() +); // node_modules/@opentelemetry/api/build/esm/propagation-api.js var propagation = PropagationAPI.getInstance(); // node_modules/@opentelemetry/api/build/esm/api/trace.js -var API_NAME4 = 'trace'; -var TraceAPI = +var API_NAME4 = "trace"; +var TraceAPI = ( /** @class */ - (function () { + function() { function TraceAPI2() { this._proxyTracerProvider = new ProxyTracerProvider(); this.wrapSpanContext = wrapSpanContext; @@ -1212,73 +1104,69 @@ var TraceAPI = this.setSpan = setSpan; this.setSpanContext = setSpanContext; } - TraceAPI2.getInstance = function () { + TraceAPI2.getInstance = function() { if (!this._instance) { this._instance = new TraceAPI2(); } return this._instance; }; - TraceAPI2.prototype.setGlobalTracerProvider = function (provider) { - var success = registerGlobal( - API_NAME4, - this._proxyTracerProvider, - DiagAPI.instance(), - ); + TraceAPI2.prototype.setGlobalTracerProvider = function(provider) { + var success = registerGlobal(API_NAME4, this._proxyTracerProvider, DiagAPI.instance()); if (success) { this._proxyTracerProvider.setDelegate(provider); } return success; }; - TraceAPI2.prototype.getTracerProvider = function () { + TraceAPI2.prototype.getTracerProvider = function() { return getGlobal(API_NAME4) || this._proxyTracerProvider; }; - TraceAPI2.prototype.getTracer = function (name, version2) { + TraceAPI2.prototype.getTracer = function(name, version2) { return this.getTracerProvider().getTracer(name, version2); }; - TraceAPI2.prototype.disable = function () { + TraceAPI2.prototype.disable = function() { unregisterGlobal(API_NAME4, DiagAPI.instance()); this._proxyTracerProvider = new ProxyTracerProvider(); }; return TraceAPI2; - })(); + }() +); // node_modules/@opentelemetry/api/build/esm/trace-api.js var trace = TraceAPI.getInstance(); // transport/message.ts -import { Type } from '@sinclair/typebox'; -var TransportMessageSchema = (t) => - Type.Object({ - id: Type.String(), - from: Type.String(), - to: Type.String(), - seq: Type.Integer(), - ack: Type.Integer(), - serviceName: Type.Optional(Type.String()), - procedureName: Type.Optional(Type.String()), - streamId: Type.String(), - controlFlags: Type.Integer(), - tracing: Type.Optional( - Type.Object({ - traceparent: Type.String(), - tracestate: Type.String(), - }), - ), - payload: t, - }); +import { Type } from "@sinclair/typebox"; +var TransportMessageSchema = (t) => Type.Object({ + id: Type.String(), + from: Type.String(), + to: Type.String(), + seq: Type.Integer(), + ack: Type.Integer(), + serviceName: Type.Optional(Type.String()), + procedureName: Type.Optional(Type.String()), + streamId: Type.String(), + controlFlags: Type.Integer(), + tracing: Type.Optional( + Type.Object({ + traceparent: Type.String(), + tracestate: Type.String() + }) + ), + payload: t +}); var ControlMessageAckSchema = Type.Object({ - type: Type.Literal('ACK'), + type: Type.Literal("ACK") }); var ControlMessageCloseSchema = Type.Object({ - type: Type.Literal('CLOSE'), + type: Type.Literal("CLOSE") }); -var currentProtocolVersion = 'v2.0'; -var acceptedProtocolVersions = ['v1.1', currentProtocolVersion]; +var currentProtocolVersion = "v2.0"; +var acceptedProtocolVersions = ["v1.1", currentProtocolVersion]; function isAcceptedProtocolVersion(version2) { return acceptedProtocolVersions.includes(version2); } var ControlMessageHandshakeRequestSchema = Type.Object({ - type: Type.Literal('HANDSHAKE_REQ'), + type: Type.Literal("HANDSHAKE_REQ"), protocolVersion: Type.String(), sessionId: Type.String(), /** @@ -1289,54 +1177,60 @@ var ControlMessageHandshakeRequestSchema = Type.Object({ expectedSessionState: Type.Object({ // what the client expects the server to send next nextExpectedSeq: Type.Integer(), - nextSentSeq: Type.Integer(), + nextSentSeq: Type.Integer() }), - metadata: Type.Optional(Type.Unknown()), + metadata: Type.Optional(Type.Unknown()) }); var HandshakeErrorRetriableResponseCodes = Type.Union([ - Type.Literal('SESSION_STATE_MISMATCH'), + Type.Literal("SESSION_STATE_MISMATCH") ]); var HandshakeErrorCustomHandlerFatalResponseCodes = Type.Union([ // The custom validation handler rejected the handler because the client is unsupported. - Type.Literal('REJECTED_UNSUPPORTED_CLIENT'), + Type.Literal("REJECTED_UNSUPPORTED_CLIENT"), // The custom validation handler rejected the handshake. - Type.Literal('REJECTED_BY_CUSTOM_HANDLER'), + Type.Literal("REJECTED_BY_CUSTOM_HANDLER") ]); var HandshakeErrorFatalResponseCodes = Type.Union([ HandshakeErrorCustomHandlerFatalResponseCodes, // The ciient sent a handshake that doesn't comply with the extended handshake metadata. - Type.Literal('MALFORMED_HANDSHAKE_META'), + Type.Literal("MALFORMED_HANDSHAKE_META"), // The ciient sent a handshake that doesn't comply with ControlMessageHandshakeRequestSchema. - Type.Literal('MALFORMED_HANDSHAKE'), + Type.Literal("MALFORMED_HANDSHAKE"), // The client's protocol version does not match the server's. - Type.Literal('PROTOCOL_VERSION_MISMATCH'), + Type.Literal("PROTOCOL_VERSION_MISMATCH") ]); var HandshakeErrorResponseCodes = Type.Union([ HandshakeErrorRetriableResponseCodes, - HandshakeErrorFatalResponseCodes, + HandshakeErrorFatalResponseCodes ]); var ControlMessageHandshakeResponseSchema = Type.Object({ - type: Type.Literal('HANDSHAKE_RESP'), + type: Type.Literal("HANDSHAKE_RESP"), status: Type.Union([ Type.Object({ ok: Type.Literal(true), - sessionId: Type.String(), + sessionId: Type.String() }), Type.Object({ ok: Type.Literal(false), reason: Type.String(), - code: HandshakeErrorResponseCodes, - }), - ]), + code: HandshakeErrorResponseCodes + }) + ]) }); var ControlMessagePayloadSchema = Type.Union([ ControlMessageCloseSchema, ControlMessageAckSchema, ControlMessageHandshakeRequestSchema, - ControlMessageHandshakeResponseSchema, + ControlMessageHandshakeResponseSchema ]); -var OpaqueTransportMessageSchema = TransportMessageSchema(Type.Unknown()); -function handshakeResponseMessage({ from, to, status }) { +var OpaqueTransportMessageSchema = TransportMessageSchema( + Type.Unknown() +); +function handshakeResponseMessage({ + from, + to, + status +}) { return { id: generateId(), from, @@ -1346,9 +1240,9 @@ function handshakeResponseMessage({ from, to, status }) { streamId: generateId(), controlFlags: 0, payload: { - type: 'HANDSHAKE_RESP', - status, - }, + type: "HANDSHAKE_RESP", + status + } }; } function closeStreamMessage(streamId) { @@ -1356,36 +1250,36 @@ function closeStreamMessage(streamId) { streamId, controlFlags: 8 /* StreamClosedBit */, payload: { - type: 'CLOSE', - }, + type: "CLOSE" + } }; } function cancelMessage(streamId, payload) { return { streamId, controlFlags: 4 /* StreamCancelBit */, - payload, + payload }; } function isAck(controlFlag) { - return (controlFlag & 1) /* AckBit */ === 1 /* AckBit */; + return (controlFlag & 1 /* AckBit */) === 1 /* AckBit */; } function isStreamOpen(controlFlag) { return ( /* eslint-disable-next-line @typescript-eslint/no-unsafe-enum-comparison */ - (controlFlag & 2) /* StreamOpenBit */ === 2 /* StreamOpenBit */ + (controlFlag & 2 /* StreamOpenBit */) === 2 /* StreamOpenBit */ ); } function isStreamClose(controlFlag) { return ( /* eslint-disable-next-line @typescript-eslint/no-unsafe-enum-comparison */ - (controlFlag & 8) /* StreamClosedBit */ === 8 /* StreamClosedBit */ + (controlFlag & 8 /* StreamClosedBit */) === 8 /* StreamClosedBit */ ); } function isStreamCancel(controlFlag) { return ( /* eslint-disable-next-line @typescript-eslint/no-unsafe-enum-comparison */ - (controlFlag & 4) /* StreamCancelBit */ === 4 /* StreamCancelBit */ + (controlFlag & 4 /* StreamCancelBit */) === 4 /* StreamCancelBit */ ); } @@ -1393,7 +1287,7 @@ function isStreamCancel(controlFlag) { var encoder = new TextEncoder(); var decoder = new TextDecoder(); function uint8ArrayToBase64(uint8Array) { - let binary = ''; + let binary = ""; uint8Array.forEach((byte) => { binary += String.fromCharCode(byte); }); @@ -1414,12 +1308,12 @@ var NaiveJsonCodec = { const val = this[key]; if (val instanceof Uint8Array) { return { $t: uint8ArrayToBase64(val) }; - } else if (typeof val === 'bigint') { + } else if (typeof val === "bigint") { return { $b: val.toString() }; } else { return val; } - }), + }) ); }, fromBuffer: (buff) => { @@ -1433,13 +1327,13 @@ var NaiveJsonCodec = { } else { return val; } - }, + } ); - if (typeof parsed !== 'object' || parsed === null) { - throw new Error('unpacked msg is not an object'); + if (typeof parsed !== "object" || parsed === null) { + throw new Error("unpacked msg is not an object"); } return parsed; - }, + } }; // transport/options.ts @@ -1450,7 +1344,7 @@ var defaultTransportOptions = { connectionTimeoutMs: 2e3, handshakeTimeoutMs: 1e3, enableTransparentSessionReconnects: true, - codec: NaiveJsonCodec, + codec: NaiveJsonCodec }; var defaultConnectionRetryOptions = { baseIntervalMs: 150, @@ -1458,14 +1352,14 @@ var defaultConnectionRetryOptions = { maxBackoffMs: 32e3, attemptBudgetCapacity: 5, budgetRestoreIntervalMs: 200, - isFatalConnectionError: () => false, + isFatalConnectionError: () => false }; var defaultClientTransportOptions = { ...defaultTransportOptions, - ...defaultConnectionRetryOptions, + ...defaultConnectionRetryOptions }; var defaultServerTransportOptions = { - ...defaultTransportOptions, + ...defaultTransportOptions }; // logging/log.ts @@ -1473,7 +1367,7 @@ var LoggingLevels = { debug: -1, info: 0, warn: 1, - error: 2, + error: 2 }; var cleanedLogFn = (log) => { return (msg, metadata) => { @@ -1482,7 +1376,7 @@ var cleanedLogFn = (log) => { if (span) { metadata.telemetry = { traceId: span.spanContext().traceId, - spanId: span.spanContext().spanId, + spanId: span.spanContext().spanId }; } } @@ -1498,28 +1392,28 @@ var cleanedLogFn = (log) => { var BaseLogger = class { minLevel; output; - constructor(output, minLevel = 'info') { + constructor(output, minLevel = "info") { this.minLevel = minLevel; this.output = output; } debug(msg, metadata) { if (LoggingLevels[this.minLevel] <= LoggingLevels.debug) { - this.output(msg, metadata ?? {}, 'debug'); + this.output(msg, metadata ?? {}, "debug"); } } info(msg, metadata) { if (LoggingLevels[this.minLevel] <= LoggingLevels.info) { - this.output(msg, metadata ?? {}, 'info'); + this.output(msg, metadata ?? {}, "info"); } } warn(msg, metadata) { if (LoggingLevels[this.minLevel] <= LoggingLevels.warn) { - this.output(msg, metadata ?? {}, 'warn'); + this.output(msg, metadata ?? {}, "warn"); } } error(msg, metadata) { if (LoggingLevels[this.minLevel] <= LoggingLevels.error) { - this.output(msg, metadata ?? {}, 'error'); + this.output(msg, metadata ?? {}, "error"); } } }; @@ -1527,16 +1421,16 @@ var createLogProxy = (log) => ({ debug: cleanedLogFn(log.debug.bind(log)), info: cleanedLogFn(log.info.bind(log)), warn: cleanedLogFn(log.warn.bind(log)), - error: cleanedLogFn(log.error.bind(log)), + error: cleanedLogFn(log.error.bind(log)) }); // transport/events.ts var ProtocolError = { - RetriesExceeded: 'conn_retry_exceeded', - HandshakeFailed: 'handshake_failed', - MessageOrderingViolated: 'message_ordering_violated', - InvalidMessage: 'invalid_message', - MessageSendFailure: 'message_send_failure', + RetriesExceeded: "conn_retry_exceeded", + HandshakeFailed: "handshake_failed", + MessageOrderingViolated: "message_ordering_violated", + InvalidMessage: "invalid_message", + MessageSendFailure: "message_send_failure" }; var EventDispatcher = class { eventListeners = {}; @@ -1592,16 +1486,16 @@ var StateMachineState = class { this._isConsumed = false; return new Proxy(this, { get(target, prop) { - if (prop === '_isConsumed' || prop === 'id' || prop === 'state') { + if (prop === "_isConsumed" || prop === "id" || prop === "state") { return Reflect.get(target, prop); } - if (prop === '_handleStateExit') { + if (prop === "_handleStateExit") { return () => { target._isConsumed = true; target._handleStateExit(); }; } - if (prop === '_handleClose') { + if (prop === "_handleClose") { return () => { target._isConsumed = true; target._handleStateExit(); @@ -1610,7 +1504,7 @@ var StateMachineState = class { } if (target._isConsumed) { throw new Error( - `${ERR_CONSUMED}: getting ${prop.toString()} on consumed state`, + `${ERR_CONSUMED}: getting ${prop.toString()} on consumed state` ); } return Reflect.get(target, prop); @@ -1618,11 +1512,11 @@ var StateMachineState = class { set(target, prop, value) { if (target._isConsumed) { throw new Error( - `${ERR_CONSUMED}: setting ${prop.toString()} on consumed state`, + `${ERR_CONSUMED}: setting ${prop.toString()} on consumed state` ); } return Reflect.set(target, prop, value); - }, + } }); } }; @@ -1669,7 +1563,7 @@ var IdentifiedSession = class extends CommonSession { telemetry, log, protocolVersion, - seqSent: messagesSent, + seqSent: messagesSent } = props; super(props); this.id = id; @@ -1686,13 +1580,13 @@ var IdentifiedSession = class extends CommonSession { const metadata = { clientId: this.from, connectedTo: this.to, - sessionId: this.id, + sessionId: this.id }; if (this.telemetry.span.isRecording()) { const spanContext = this.telemetry.span.spanContext(); metadata.telemetry = { traceId: spanContext.traceId, - spanId: spanContext.spanId, + spanId: spanContext.spanId }; } return metadata; @@ -1704,7 +1598,7 @@ var IdentifiedSession = class extends CommonSession { to: this.to, from: this.from, seq: this.seq, - ack: this.ack, + ack: this.ack }; this.seq++; return msg; @@ -1717,10 +1611,11 @@ var IdentifiedSession = class extends CommonSession { this.sendBuffer.push(constructedMsg); return { ok: true, - value: constructedMsg.id, + value: constructedMsg.id }; } - _handleStateExit() {} + _handleStateExit() { + } _handleClose() { this.sendBuffer.length = 0; this.telemetry.span.end(); @@ -1758,18 +1653,18 @@ function sendMessage(conn, codec, msg) { if (!sent) { return { ok: false, - reason: 'failed to send message', + reason: "failed to send message" }; } return { ok: true, - value: msg.id, + value: msg.id }; } // transport/sessionStateMachine/SessionConnecting.ts var SessionConnecting = class extends IdentifiedSessionWithGracePeriod { - state = 'Connecting' /* Connecting */; + state = "Connecting" /* Connecting */; connPromise; listeners; connectionTimeout; @@ -1785,7 +1680,7 @@ var SessionConnecting = class extends IdentifiedSessionWithGracePeriod { (err) => { if (this._isConsumed) return; this.listeners.onConnectionFailed(err); - }, + } ); this.connectionTimeout = setTimeout(() => { this.listeners.onConnectionTimeout(); @@ -1796,18 +1691,17 @@ var SessionConnecting = class extends IdentifiedSessionWithGracePeriod { bestEffortClose() { const logger = this.log; const metadata = this.loggingMetadata; - this.connPromise - .then((conn) => { - conn.close(); - logger?.info( - 'connection eventually resolved but session has transitioned, closed connection', - { - ...metadata, - ...conn.loggingMetadata, - }, - ); - }) - .catch(() => {}); + this.connPromise.then((conn) => { + conn.close(); + logger?.info( + "connection eventually resolved but session has transitioned, closed connection", + { + ...metadata, + ...conn.loggingMetadata + } + ); + }).catch(() => { + }); } _handleStateExit() { super._handleStateExit(); @@ -1824,7 +1718,7 @@ var SessionConnecting = class extends IdentifiedSessionWithGracePeriod { // transport/sessionStateMachine/SessionNoConnection.ts var SessionNoConnection = class extends IdentifiedSessionWithGracePeriod { - state = 'NoConnection' /* NoConnection */; + state = "NoConnection" /* NoConnection */; _handleClose() { super._handleClose(); } @@ -1834,22 +1728,24 @@ var SessionNoConnection = class extends IdentifiedSessionWithGracePeriod { }; // router/services.ts -import { Type as Type3, Kind as Kind2 } from '@sinclair/typebox'; +import { Type as Type3, Kind as Kind2 } from "@sinclair/typebox"; // router/errors.ts -import { Kind, Type as Type2 } from '@sinclair/typebox'; -var UNCAUGHT_ERROR_CODE = 'UNCAUGHT_ERROR'; -var UNEXPECTED_DISCONNECT_CODE = 'UNEXPECTED_DISCONNECT'; -var INVALID_REQUEST_CODE = 'INVALID_REQUEST'; -var CANCEL_CODE = 'CANCEL'; -var ErrResultSchema = (t) => - Type2.Object({ - ok: Type2.Literal(false), - payload: t, - }); +import { + Kind, + Type as Type2 +} from "@sinclair/typebox"; +var UNCAUGHT_ERROR_CODE = "UNCAUGHT_ERROR"; +var UNEXPECTED_DISCONNECT_CODE = "UNEXPECTED_DISCONNECT"; +var INVALID_REQUEST_CODE = "INVALID_REQUEST"; +var CANCEL_CODE = "CANCEL"; +var ErrResultSchema = (t) => Type2.Object({ + ok: Type2.Literal(false), + payload: t +}); var ValidationErrorDetails = Type2.Object({ path: Type2.String(), - message: Type2.String(), + message: Type2.String() }); var ValidationErrors = Type2.Array(ValidationErrorDetails); function castTypeboxValueErrors(errors) { @@ -1857,24 +1753,24 @@ function castTypeboxValueErrors(errors) { for (const error of errors) { result.push({ path: error.path, - message: error.message, + message: error.message }); } return result; } var CancelErrorSchema = Type2.Object({ code: Type2.Literal(CANCEL_CODE), - message: Type2.String(), + message: Type2.String() }); var CancelResultSchema = ErrResultSchema(CancelErrorSchema); var ReaderErrorSchema = Type2.Union([ Type2.Object({ code: Type2.Literal(UNCAUGHT_ERROR_CODE), - message: Type2.String(), + message: Type2.String() }), Type2.Object({ code: Type2.Literal(UNEXPECTED_DISCONNECT_CODE), - message: Type2.String(), + message: Type2.String() }), Type2.Object({ code: Type2.Literal(INVALID_REQUEST_CODE), @@ -1882,15 +1778,15 @@ var ReaderErrorSchema = Type2.Union([ extras: Type2.Optional( Type2.Object({ firstValidationErrors: Type2.Array(ValidationErrorDetails), - totalErrors: Type2.Number(), - }), - ), + totalErrors: Type2.Number() + }) + ) }), - CancelErrorSchema, + CancelErrorSchema ]); var ReaderErrorResultSchema = ErrResultSchema(ReaderErrorSchema); function isUnion(schema) { - return schema[Kind] === 'Union'; + return schema[Kind] === "Union"; } function flattenErrorType(errType) { if (!isUnion(errType)) { @@ -1993,12 +1889,9 @@ function createServiceSchema() { static define(configOrProcedures, maybeProcedures) { let config; let procedures; - if ( - 'initializeState' in configOrProcedures && - typeof configOrProcedures.initializeState === 'function' - ) { + if ("initializeState" in configOrProcedures && typeof configOrProcedures.initializeState === "function") { if (!maybeProcedures) { - throw new Error('Expected procedures to be defined'); + throw new Error("Expected procedures to be defined"); } config = configOrProcedures; procedures = maybeProcedures; @@ -2021,19 +1914,15 @@ function createServiceSchema() { output: Strict(procDef.responseData), errors: getSerializedProcErrors(procDef), // Only add `description` field if the type declares it. - ...('description' in procDef - ? { description: procDef.description } - : {}), + ..."description" in procDef ? { description: procDef.description } : {}, type: procDef.type, // Only add the `input` field if the type declares it. - ...('requestData' in procDef - ? { - input: Strict(procDef.requestData), - } - : {}), - }, - ]), - ), + ..."requestData" in procDef ? { + input: Strict(procDef.requestData) + } : {} + } + ]) + ) }; } // TODO remove once clients migrate to v2 @@ -2045,40 +1934,38 @@ function createServiceSchema() { serializeV1Compat() { return { procedures: Object.fromEntries( - Object.entries(this.procedures).map(([procName, procDef]) => { - if (procDef.type === 'rpc' || procDef.type === 'subscription') { + Object.entries(this.procedures).map( + ([procName, procDef]) => { + if (procDef.type === "rpc" || procDef.type === "subscription") { + return [ + procName, + { + // BACKWARDS COMPAT: map init to input for protocolv1 + // this is the only change needed to make it compatible. + input: Strict(procDef.requestInit), + output: Strict(procDef.responseData), + errors: getSerializedProcErrors(procDef), + // Only add `description` field if the type declares it. + ..."description" in procDef ? { description: procDef.description } : {}, + type: procDef.type + } + ]; + } return [ procName, { - // BACKWARDS COMPAT: map init to input for protocolv1 - // this is the only change needed to make it compatible. - input: Strict(procDef.requestInit), + init: Strict(procDef.requestInit), output: Strict(procDef.responseData), errors: getSerializedProcErrors(procDef), // Only add `description` field if the type declares it. - ...('description' in procDef - ? { description: procDef.description } - : {}), + ..."description" in procDef ? { description: procDef.description } : {}, type: procDef.type, - }, + input: Strict(procDef.requestData) + } ]; } - return [ - procName, - { - init: Strict(procDef.requestInit), - output: Strict(procDef.responseData), - errors: getSerializedProcErrors(procDef), - // Only add `description` field if the type declares it. - ...('description' in procDef - ? { description: procDef.description } - : {}), - type: procDef.type, - input: Strict(procDef.requestData), - }, - ]; - }), - ), + ) + ) }; } /** @@ -2096,20 +1983,17 @@ function createServiceSchema() { return Object.freeze({ state, procedures: this.procedures, - [Symbol.asyncDispose]: dispose, + [Symbol.asyncDispose]: dispose }); } }; } function getSerializedProcErrors(procDef) { - if ( - !('responseError' in procDef) || - procDef.responseError[Kind2] === 'Never' - ) { + if (!("responseError" in procDef) || procDef.responseError[Kind2] === "Never") { return Strict(ReaderErrorSchema); } const withProtocolErrors = flattenErrorType( - Type3.Union([procDef.responseError, ReaderErrorSchema]), + Type3.Union([procDef.responseError, ReaderErrorSchema]) ); return Strict(withProtocolErrors); } @@ -2166,43 +2050,46 @@ var ServiceScaffold = class { * ``` */ finalize(procedures) { - return createServiceSchema().define(this.config, procedures); + return createServiceSchema().define( + this.config, + procedures + ); } }; // router/result.ts -import { Type as Type4 } from '@sinclair/typebox'; +import { Type as Type4 } from "@sinclair/typebox"; var AnyResultSchema = Type4.Union([ Type4.Object({ ok: Type4.Literal(false), payload: Type4.Object({ code: Type4.String(), message: Type4.String(), - extras: Type4.Optional(Type4.Unknown()), - }), + extras: Type4.Optional(Type4.Unknown()) + }) }), Type4.Object({ ok: Type4.Literal(true), - payload: Type4.Unknown(), - }), + payload: Type4.Unknown() + }) ]); function Ok(payload) { return { ok: true, - payload, + payload }; } function Err(error) { return { ok: false, - payload: error, + payload: error }; } // router/streams.ts var ReadableBrokenError = { - code: 'READABLE_BROKEN', - message: 'Readable was broken before it is fully consumed', + code: "READABLE_BROKEN", + message: "Readable was broken before it is fully consumed" }; function createPromiseWithResolvers() { let resolve; @@ -2216,7 +2103,7 @@ function createPromiseWithResolvers() { // @ts-expect-error promise callbacks are sync resolve, // @ts-expect-error promise callbacks are sync - reject, + reject }; } var ReadableImpl = class { @@ -2260,7 +2147,7 @@ var ReadableImpl = class { */ [Symbol.asyncIterator]() { if (this.locked) { - throw new TypeError('Readable is already locked'); + throw new TypeError("Readable is already locked"); } this.locked = true; let didSignalBreak = false; @@ -2269,21 +2156,21 @@ var ReadableImpl = class { if (didSignalBreak) { return { done: true, - value: void 0, + value: void 0 }; } while (this.queue.length === 0) { if (this.closed && !this.brokenWithValuesLeftToRead) { return { done: true, - value: void 0, + value: void 0 }; } if (this.broken) { didSignalBreak = true; return { done: false, - value: Err(ReadableBrokenError), + value: Err(ReadableBrokenError) }; } if (!this.next) { @@ -2298,7 +2185,7 @@ var ReadableImpl = class { return: async () => { this.break(); return { done: true, value: void 0 }; - }, + } }; } /** @@ -2344,7 +2231,7 @@ var ReadableImpl = class { return; } if (this.closed) { - throw new Error('Cannot push to closed Readable'); + throw new Error("Cannot push to closed Readable"); } this.queue.push(value); this.next?.resolve(); @@ -2355,7 +2242,7 @@ var ReadableImpl = class { */ _triggerClose() { if (this.closed) { - throw new Error('Unexpected closing multiple times'); + throw new Error("Unexpected closing multiple times"); } this.closed = true; this.next?.resolve(); @@ -2392,7 +2279,7 @@ var WritableImpl = class { } write(value) { if (this.closed) { - throw new Error('Cannot write to closed Writable'); + throw new Error("Cannot write to closed Writable"); } this.writeCb(value); } @@ -2420,21 +2307,21 @@ var WritableImpl = class { }; // router/procedures.ts -import { Type as Type5 } from '@sinclair/typebox'; +import { Type as Type5 } from "@sinclair/typebox"; function rpc({ requestInit, responseData, responseError = Type5.Never(), description, - handler, + handler }) { return { - ...(description ? { description } : {}), - type: 'rpc', + ...description ? { description } : {}, + type: "rpc", requestInit, responseData, responseError, - handler, + handler }; } function upload({ @@ -2443,16 +2330,16 @@ function upload({ responseData, responseError = Type5.Never(), description, - handler, + handler }) { return { - type: 'upload', - ...(description ? { description } : {}), + type: "upload", + ...description ? { description } : {}, requestInit, requestData, responseData, responseError, - handler, + handler }; } function subscription({ @@ -2460,15 +2347,15 @@ function subscription({ responseData, responseError = Type5.Never(), description, - handler, + handler }) { return { - type: 'subscription', - ...(description ? { description } : {}), + type: "subscription", + ...description ? { description } : {}, requestInit, responseData, responseError, - handler, + handler }; } function stream({ @@ -2477,32 +2364,32 @@ function stream({ responseData, responseError = Type5.Never(), description, - handler, + handler }) { return { - type: 'stream', - ...(description ? { description } : {}), + type: "stream", + ...description ? { description } : {}, requestInit, requestData, responseData, responseError, - handler, + handler }; } var Procedure = { rpc, upload, subscription, - stream, + stream }; // router/server.ts -import { Value } from '@sinclair/typebox/value'; +import { Value } from "@sinclair/typebox/value"; // transport/stringifyError.ts function coerceErrorString(err) { if (err instanceof Error) { - return err.message || 'unknown reason'; + return err.message || "unknown reason"; } return `[coerced to error] ${String(err)}`; } @@ -2525,14 +2412,7 @@ var RiverServer = class { streams; services; unregisterTransportListeners; - constructor( - transport, - services2, - handshakeOptions, - extendedContext, - maxCancelledStreamTombstonesPerSession = 200, - middlewares = [], - ) { + constructor(transport, services2, handshakeOptions, extendedContext, maxCancelledStreamTombstonesPerSession = 200, middlewares = []) { const instances = {}; this.middlewares = middlewares; this.services = instances; @@ -2543,7 +2423,7 @@ var RiverServer = class { instances[name] = instance; this.contextMap.set(instance, { ...extendedContext, - state: instance.state, + state: instance.state }); } if (handshakeOptions) { @@ -2552,8 +2432,7 @@ var RiverServer = class { this.transport = transport; this.streams = /* @__PURE__ */ new Map(); this.serverCancelledStreams = /* @__PURE__ */ new Map(); - this.maxCancelledStreamTombstonesPerSession = - maxCancelledStreamTombstonesPerSession; + this.maxCancelledStreamTombstonesPerSession = maxCancelledStreamTombstonesPerSession; this.log = transport.log; const handleCreatingNewStreams = (message) => { if (message.to !== this.transport.clientId) { @@ -2561,8 +2440,8 @@ var RiverServer = class { `got msg with destination that isn't this server, ignoring`, { clientId: this.transport.clientId, - transportMessage: message, - }, + transportMessage: message + } ); return; } @@ -2589,15 +2468,15 @@ var RiverServer = class { newStreamProps.tracingCtx, (span) => { this.createNewProcStream(span, newStreamProps); - }, + } ); }; const handleSessionStatus = (evt) => { - if (evt.status !== 'closing') return; + if (evt.status !== "closing") return; const disconnectedClientId = evt.session.to; this.log?.info( `got session disconnect from ${disconnectedClientId}, cleaning up streams`, - evt.session.loggingMetadata, + evt.session.loggingMetadata ); for (const stream2 of this.streams.values()) { if (stream2.from === disconnectedClientId) { @@ -2607,20 +2486,20 @@ var RiverServer = class { this.serverCancelledStreams.delete(disconnectedClientId); }; const handleTransportStatus = (evt) => { - if (evt.status !== 'closed') return; + if (evt.status !== "closed") return; this.unregisterTransportListeners(); }; this.unregisterTransportListeners = () => { - this.transport.removeEventListener('message', handleCreatingNewStreams); - this.transport.removeEventListener('sessionStatus', handleSessionStatus); + this.transport.removeEventListener("message", handleCreatingNewStreams); + this.transport.removeEventListener("sessionStatus", handleSessionStatus); this.transport.removeEventListener( - 'transportStatus', - handleTransportStatus, + "transportStatus", + handleTransportStatus ); }; - this.transport.addEventListener('message', handleCreatingNewStreams); - this.transport.addEventListener('sessionStatus', handleSessionStatus); - this.transport.addEventListener('transportStatus', handleTransportStatus); + this.transport.addEventListener("message", handleCreatingNewStreams); + this.transport.addEventListener("sessionStatus", handleSessionStatus); + this.transport.addEventListener("transportStatus", handleTransportStatus); } createNewProcStream(span, props) { const { @@ -2633,25 +2512,25 @@ var RiverServer = class { serviceContext, initPayload, procClosesWithInit, - passInitAsDataForBackwardsCompat, + passInitAsDataForBackwardsCompat } = props; const { to: from, loggingMetadata, protocolVersion, - id: sessionId, + id: sessionId } = initialSession; loggingMetadata.telemetry = { traceId: span.spanContext().traceId, - spanId: span.spanContext().spanId, + spanId: span.spanContext().spanId }; let cleanClose = true; const onMessage = (msg) => { if (msg.from !== from) { - this.log?.error('got stream message from unexpected client', { + this.log?.error("got stream message from unexpected client", { ...loggingMetadata, transportMessage: msg, - tags: ['invariant-violation'], + tags: ["invariant-violation"] }); return; } @@ -2662,15 +2541,15 @@ var RiverServer = class { } else { cancelResult = Err({ code: CANCEL_CODE, - message: 'stream cancelled, client sent invalid payload', + message: "stream cancelled, client sent invalid payload" }); - this.log?.warn('got stream cancel without a valid protocol error', { + this.log?.warn("got stream cancel without a valid protocol error", { ...loggingMetadata, transportMessage: msg, validationErrors: [ - ...Value.Errors(CancelResultSchema, msg.payload), + ...Value.Errors(CancelResultSchema, msg.payload) ], - tags: ['invalid-request'], + tags: ["invalid-request"] }); } if (!reqReadable.isClosed()) { @@ -2681,63 +2560,57 @@ var RiverServer = class { return; } if (reqReadable.isClosed()) { - this.log?.warn('received message after request stream is closed', { + this.log?.warn("received message after request stream is closed", { ...loggingMetadata, transportMessage: msg, - tags: ['invalid-request'], + tags: ["invalid-request"] }); onServerCancel({ code: INVALID_REQUEST_CODE, - message: 'received message after request stream is closed', + message: "received message after request stream is closed" }); return; } - if ( - 'requestData' in procedure && - Value.Check(procedure.requestData, msg.payload) - ) { + if ("requestData" in procedure && Value.Check(procedure.requestData, msg.payload)) { reqReadable._pushValue(Ok(msg.payload)); if (isStreamCloseBackwardsCompat(msg.controlFlags, protocolVersion)) { closeReadable(); } return; } - if ( - Value.Check(ControlMessagePayloadSchema, msg.payload) && - isStreamCloseBackwardsCompat(msg.controlFlags, protocolVersion) - ) { + if (Value.Check(ControlMessagePayloadSchema, msg.payload) && isStreamCloseBackwardsCompat(msg.controlFlags, protocolVersion)) { closeReadable(); return; } let validationErrors; let errMessage; - if ('requestData' in procedure) { - errMessage = 'message in requestData position did not match schema'; + if ("requestData" in procedure) { + errMessage = "message in requestData position did not match schema"; validationErrors = castTypeboxValueErrors( - Value.Errors(procedure.requestData, msg.payload), + Value.Errors(procedure.requestData, msg.payload) ); } else { validationErrors = castTypeboxValueErrors( - Value.Errors(ControlMessagePayloadSchema, msg.payload), + Value.Errors(ControlMessagePayloadSchema, msg.payload) ); - errMessage = 'message in control payload position did not match schema'; + errMessage = "message in control payload position did not match schema"; } this.log?.warn(errMessage, { ...loggingMetadata, transportMessage: msg, validationErrors: validationErrors.map((error) => ({ path: error.path, - message: error.message, + message: error.message })), - tags: ['invalid-request'], + tags: ["invalid-request"] }); onServerCancel({ code: INVALID_REQUEST_CODE, message: errMessage, extras: { totalErrors: validationErrors.length, - firstValidationErrors: validationErrors.slice(0, 5), - }, + firstValidationErrors: validationErrors.slice(0, 5) + } }); }; const finishedController = new AbortController(); @@ -2753,18 +2626,18 @@ var RiverServer = class { cleanClose = false; const errPayload = { code: UNEXPECTED_DISCONNECT_CODE, - message: 'client unexpectedly disconnected', + message: "client unexpectedly disconnected" }; if (!reqReadable.isClosed()) { reqReadable._pushValue(Err(errPayload)); closeReadable(); } resWritable.close(); - }, + } }; const sessionScopedSend = this.transport.getSessionBoundSendFn( from, - sessionId, + sessionId ); const cancelStream = (streamId2, payload) => { this.cancelStream(from, sessionScopedSend, streamId2, payload); @@ -2787,12 +2660,11 @@ var RiverServer = class { finishedController.abort(); this.streams.delete(streamId); }; - const procClosesWithResponse = - procedure.type === 'rpc' || procedure.type === 'upload'; + const procClosesWithResponse = procedure.type === "rpc" || procedure.type === "upload"; const reqReadable = new ReadableImpl(); const closeReadable = () => { reqReadable._triggerClose(); - if (protocolVersion === 'v1.1') { + if (protocolVersion === "v1.1") { if (!procClosesWithResponse && !resWritable.isClosed()) { resWritable.close(); } @@ -2811,10 +2683,8 @@ var RiverServer = class { } sessionScopedSend({ streamId, - controlFlags: procClosesWithResponse - ? getStreamCloseBackwardsCompat(protocolVersion) - : 0, - payload: response, + controlFlags: procClosesWithResponse ? getStreamCloseBackwardsCompat(protocolVersion) : 0, + payload: response }); if (procClosesWithResponse) { resWritable.close(); @@ -2827,7 +2697,7 @@ var RiverServer = class { message.controlFlags = getStreamCloseBackwardsCompat(protocolVersion); sessionScopedSend(message); } - if (protocolVersion === 'v1.1') { + if (protocolVersion === "v1.1") { if (!reqReadable.isClosed()) { closeReadable(); } @@ -2835,7 +2705,7 @@ var RiverServer = class { if (reqReadable.isClosed()) { cleanup(); } - }, + } }); const onHandlerError = (err, span2) => { const errorMsg = coerceErrorString(err); @@ -2846,18 +2716,18 @@ var RiverServer = class { ...loggingMetadata, transportMessage: { procedureName, - serviceName, + serviceName }, extras: { error: errorMsg, - originalException: err, + originalException: err }, - tags: ['uncaught-handler-error'], - }, + tags: ["uncaught-handler-error"] + } ); onServerCancel({ code: UNCAUGHT_ERROR_CODE, - message: errorMsg, + message: errorMsg }); }; if (procClosesWithInit) { @@ -2872,12 +2742,12 @@ var RiverServer = class { cancel: (message) => { const errRes = { code: CANCEL_CODE, - message: message ?? 'cancelled by server procedure handler', + message: message ?? "cancelled by server procedure handler" }; onServerCancel(errRes); return Err(errRes); }, - signal: finishedController.signal, + signal: finishedController.signal }; const middlewareContext = { ...serviceContext, @@ -2888,15 +2758,15 @@ var RiverServer = class { signal: finishedController.signal, streamId, procedureName, - serviceName, + serviceName }; const runProcedureHandler = async () => { switch (procedure.type) { - case 'rpc': + case "rpc": try { const responsePayload = await procedure.handler({ ctx: handlerContextWithSpan, - reqInit: initPayload, + reqInit: initPayload }); if (resWritable.isClosed()) { return; @@ -2908,13 +2778,13 @@ var RiverServer = class { span.end(); } break; - case 'stream': + case "stream": try { await procedure.handler({ ctx: handlerContextWithSpan, reqInit: initPayload, reqReadable, - resWritable, + resWritable }); } catch (err) { onHandlerError(err, span); @@ -2922,12 +2792,12 @@ var RiverServer = class { span.end(); } break; - case 'subscription': + case "subscription": try { await procedure.handler({ ctx: handlerContextWithSpan, reqInit: initPayload, - resWritable, + resWritable }); } catch (err) { onHandlerError(err, span); @@ -2935,12 +2805,12 @@ var RiverServer = class { span.end(); } break; - case 'upload': + case "upload": try { const responsePayload = await procedure.handler({ ctx: handlerContextWithSpan, reqInit: initPayload, - reqReadable, + reqReadable }); if (resWritable.isClosed()) { return; @@ -2960,13 +2830,13 @@ var RiverServer = class { middleware({ ctx: middlewareContext, reqInit: initPayload, - next, + next }); }; }, () => { void runProcedureHandler(); - }, + } )(); if (!finishedController.signal.aborted) { this.streams.set(streamId, procStream); @@ -2978,7 +2848,7 @@ var RiverServer = class { const err = `no context found for ${serviceName}`; this.log?.error(err, { clientId: this.transport.clientId, - tags: ['invariant-violation'], + tags: ["invariant-violation"] }); throw new Error(err); } @@ -2990,32 +2860,32 @@ var RiverServer = class { this.log?.error(`couldn't find session for ${initMessage.from}`, { clientId: this.transport.clientId, transportMessage: initMessage, - tags: ['invariant-violation'], + tags: ["invariant-violation"] }); return null; } const sessionScopedSend = this.transport.getSessionBoundSendFn( initMessage.from, - session.id, + session.id ); const cancelStream = (streamId, payload) => { this.cancelStream(initMessage.from, sessionScopedSend, streamId, payload); }; const sessionMetadata = this.transport.sessionHandshakeMetadata.get( - session.to, + session.to ); if (!sessionMetadata) { const errMessage = `session doesn't have handshake metadata`; this.log?.error(errMessage, { ...session.loggingMetadata, - tags: ['invariant-violation'], + tags: ["invariant-violation"] }); cancelStream( initMessage.streamId, Err({ code: UNCAUGHT_ERROR_CODE, - message: errMessage, - }), + message: errMessage + }) ); return null; } @@ -3025,14 +2895,14 @@ var RiverServer = class { ...session.loggingMetadata, clientId: this.transport.clientId, transportMessage: initMessage, - tags: ['invalid-request'], + tags: ["invalid-request"] }); cancelStream( initMessage.streamId, Err({ code: INVALID_REQUEST_CODE, - message: errMessage, - }), + message: errMessage + }) ); return null; } @@ -3041,14 +2911,14 @@ var RiverServer = class { this.log?.warn(errMessage, { ...session.loggingMetadata, transportMessage: initMessage, - tags: ['invalid-request'], + tags: ["invalid-request"] }); cancelStream( initMessage.streamId, Err({ code: INVALID_REQUEST_CODE, - message: errMessage, - }), + message: errMessage + }) ); return null; } @@ -3057,14 +2927,14 @@ var RiverServer = class { this.log?.warn(errMessage, { ...session.loggingMetadata, transportMessage: initMessage, - tags: ['invalid-request'], + tags: ["invalid-request"] }); cancelStream( initMessage.streamId, Err({ code: INVALID_REQUEST_CODE, - message: errMessage, - }), + message: errMessage + }) ); return null; } @@ -3074,14 +2944,14 @@ var RiverServer = class { ...session.loggingMetadata, clientId: this.transport.clientId, transportMessage: initMessage, - tags: ['invalid-request'], + tags: ["invalid-request"] }); cancelStream( initMessage.streamId, Err({ code: INVALID_REQUEST_CODE, - message: errMessage, - }), + message: errMessage + }) ); return null; } @@ -3091,37 +2961,32 @@ var RiverServer = class { this.log?.warn(errMessage, { ...session.loggingMetadata, transportMessage: initMessage, - tags: ['invalid-request'], + tags: ["invalid-request"] }); cancelStream( initMessage.streamId, Err({ code: INVALID_REQUEST_CODE, - message: errMessage, - }), + message: errMessage + }) ); return null; } const serviceContext = this.getContext(service, initMessage.serviceName); const procedure = service.procedures[initMessage.procedureName]; - if (!['rpc', 'upload', 'stream', 'subscription'].includes(procedure.type)) { + if (!["rpc", "upload", "stream", "subscription"].includes(procedure.type)) { this.log?.error( `got request for invalid procedure type ${procedure.type} at ${initMessage.serviceName}.${initMessage.procedureName}`, { ...session.loggingMetadata, transportMessage: initMessage, - tags: ['invariant-violation'], - }, + tags: ["invariant-violation"] + } ); return null; } let passInitAsDataForBackwardsCompat = false; - if ( - session.protocolVersion === 'v1.1' && - (procedure.type === 'upload' || procedure.type === 'stream') && - Value.Check(procedure.requestData, initMessage.payload) && - Value.Check(procedure.requestInit, {}) - ) { + if (session.protocolVersion === "v1.1" && (procedure.type === "upload" || procedure.type === "stream") && Value.Check(procedure.requestData, initMessage.payload) && Value.Check(procedure.requestInit, {})) { passInitAsDataForBackwardsCompat = true; } else if (!Value.Check(procedure.requestInit, initMessage.payload)) { const errMessage = `procedure init failed validation`; @@ -3129,14 +2994,14 @@ var RiverServer = class { ...session.loggingMetadata, clientId: this.transport.clientId, transportMessage: initMessage, - tags: ['invalid-request'], + tags: ["invalid-request"] }); cancelStream( initMessage.streamId, Err({ code: INVALID_REQUEST_CODE, - message: errMessage, - }), + message: errMessage + }) ); return null; } @@ -3152,16 +3017,16 @@ var RiverServer = class { serviceContext, procClosesWithInit: isStreamCloseBackwardsCompat( initMessage.controlFlags, - session.protocolVersion, + session.protocolVersion ), - passInitAsDataForBackwardsCompat, + passInitAsDataForBackwardsCompat }; } cancelStream(to, sessionScopedSend, streamId, payload) { let cancelledStreamsInSession = this.serverCancelledStreams.get(to); if (!cancelledStreamsInSession) { cancelledStreamsInSession = new LRUSet( - this.maxCancelledStreamTombstonesPerSession, + this.maxCancelledStreamTombstonesPerSession ); this.serverCancelledStreams.set(to, cancelledStreamsInSession); } @@ -3200,19 +3065,19 @@ var LRUSet = class { } }; function isStreamCancelBackwardsCompat(controlFlags, protocolVersion) { - if (protocolVersion === 'v1.1') { + if (protocolVersion === "v1.1") { return false; } return isStreamCancel(controlFlags); } function isStreamCloseBackwardsCompat(controlFlags, protocolVersion) { - if (protocolVersion === 'v1.1') { + if (protocolVersion === "v1.1") { return isStreamCancel(controlFlags); } return isStreamClose(controlFlags); } function getStreamCloseBackwardsCompat(protocolVersion) { - if (protocolVersion === 'v1.1') { + if (protocolVersion === "v1.1") { return 4 /* StreamCancelBit */; } return 8 /* StreamClosedBit */; @@ -3224,7 +3089,7 @@ function createServer(transport, services2, providedServerOptions) { providedServerOptions?.handshakeOptions, providedServerOptions?.extendedContext, providedServerOptions?.maxCancelledStreamTombstonesPerSession, - providedServerOptions?.middlewares, + providedServerOptions?.middlewares ); } @@ -3234,30 +3099,22 @@ function createServerHandshakeOptions(schema, validate) { } // package.json -var version = '0.212.2'; +var version = "0.212.2"; // tracing/index.ts -function createSessionTelemetryInfo( - tracer, - sessionId, - to, - from, - propagationCtx, -) { - const parentCtx = propagationCtx - ? propagation.extract(context.active(), propagationCtx) - : context.active(); +function createSessionTelemetryInfo(tracer, sessionId, to, from, propagationCtx) { + const parentCtx = propagationCtx ? propagation.extract(context.active(), propagationCtx) : context.active(); const span = tracer.startSpan( `river.session`, { attributes: { - component: 'river', - 'river.session.id': sessionId, - 'river.session.to': to, - 'river.session.from': from, - }, + component: "river", + "river.session.id": sessionId, + "river.session.to": to, + "river.session.from": from + } }, - parentCtx, + parentCtx ); const ctx = trace.setSpan(parentCtx, span); return { span, ctx }; @@ -3267,64 +3124,53 @@ function createConnectionTelemetryInfo(tracer, connection, info) { `river.connection`, { attributes: { - component: 'river', - 'river.connection.id': connection.id, + component: "river", + "river.connection.id": connection.id }, - links: [{ context: info.span.spanContext() }], + links: [{ context: info.span.spanContext() }] }, - info.ctx, + info.ctx ); const ctx = trace.setSpan(info.ctx, span); return { span, ctx }; } -function createHandlerSpan( - tracer, - session, - kind, - serviceName, - procedureName, - streamId, - tracing, - fn, -) { - const ctx = tracing - ? propagation.extract(context.active(), tracing) - : context.active(); +function createHandlerSpan(tracer, session, kind, serviceName, procedureName, streamId, tracing, fn) { + const ctx = tracing ? propagation.extract(context.active(), tracing) : context.active(); return tracer.startActiveSpan( `river.server.${serviceName}.${procedureName}`, { attributes: { - component: 'river', - 'river.method.kind': kind, - 'river.method.service': serviceName, - 'river.method.name': procedureName, - 'river.streamId': streamId, - 'span.kind': 'server', + component: "river", + "river.method.kind": kind, + "river.method.service": serviceName, + "river.method.name": procedureName, + "river.streamId": streamId, + "span.kind": "server" }, links: [{ context: session.telemetry.span.spanContext() }], - kind: SpanKind.SERVER, + kind: SpanKind.SERVER }, ctx, - fn, + fn ); } function recordRiverError(span, error) { span.setStatus({ code: SpanStatusCode.ERROR, - message: error.message, + message: error.message }); span.setAttributes({ - 'river.error_code': error.code, - 'river.error_message': error.message, + "river.error_code": error.code, + "river.error_message": error.message }); } function getTracer() { - return trace.getTracer('river', version); + return trace.getTracer("river", version); } // transport/sessionStateMachine/SessionWaitingForHandshake.ts var SessionWaitingForHandshake = class extends CommonSession { - state = 'WaitingForHandshake' /* WaitingForHandshake */; + state = "WaitingForHandshake" /* WaitingForHandshake */; conn; listeners; handshakeTimeout; @@ -3343,7 +3189,7 @@ var SessionWaitingForHandshake = class extends CommonSession { return { clientId: this.from, connId: this.conn.id, - ...this.conn.loggingMetadata, + ...this.conn.loggingMetadata }; } onHandshakeData = (msg) => { @@ -3351,7 +3197,7 @@ var SessionWaitingForHandshake = class extends CommonSession { if (!parsedMsgRes.ok) { this.listeners.onInvalidHandshake( `could not parse handshake message: ${parsedMsgRes.reason}`, - 'MALFORMED_HANDSHAKE', + "MALFORMED_HANDSHAKE" ); return; } @@ -3374,7 +3220,7 @@ var SessionWaitingForHandshake = class extends CommonSession { // transport/sessionStateMachine/SessionHandshaking.ts var SessionHandshaking = class extends IdentifiedSessionWithGracePeriod { - state = 'Handshaking' /* Handshaking */; + state = "Handshaking" /* Handshaking */; conn; listeners; handshakeTimeout; @@ -3392,7 +3238,7 @@ var SessionHandshaking = class extends IdentifiedSessionWithGracePeriod { get loggingMetadata() { return { ...super.loggingMetadata, - ...this.conn.loggingMetadata, + ...this.conn.loggingMetadata }; } onHandshakeData = (msg) => { @@ -3400,7 +3246,7 @@ var SessionHandshaking = class extends IdentifiedSessionWithGracePeriod { if (!parsedMsgRes.ok) { this.listeners.onInvalidHandshake( `could not parse handshake message: ${parsedMsgRes.reason}`, - 'MALFORMED_HANDSHAKE', + "MALFORMED_HANDSHAKE" ); return; } @@ -3427,7 +3273,7 @@ var SessionHandshaking = class extends IdentifiedSessionWithGracePeriod { // transport/sessionStateMachine/SessionConnected.ts var SessionConnected = class extends IdentifiedSession { - state = 'Connected' /* Connected */; + state = "Connected" /* Connected */; conn; listeners; heartbeatHandle; @@ -3447,7 +3293,7 @@ var SessionConnected = class extends IdentifiedSession { this.log?.error(msg, { ...this.loggingMetadata, transportMessage: constructedMsg, - tags: ['invariant-violation'], + tags: ["invariant-violation"] }); throw new Error(msg); } @@ -3475,10 +3321,8 @@ var SessionConnected = class extends IdentifiedSession { sendBufferedMessages() { if (this.sendBuffer.length > 0) { this.log?.info( - `sending ${ - this.sendBuffer.length - } buffered messages, starting at seq ${this.nextSeq()}`, - this.loggingMetadata, + `sending ${this.sendBuffer.length} buffered messages, starting at seq ${this.nextSeq()}`, + this.loggingMetadata ); for (const msg of this.sendBuffer) { this.assertSendOrdering(msg); @@ -3495,7 +3339,7 @@ var SessionConnected = class extends IdentifiedSession { get loggingMetadata() { return { ...super.loggingMetadata, - ...this.conn.loggingMetadata, + ...this.conn.loggingMetadata }; } startMissingHeartbeatTimeout() { @@ -3504,10 +3348,10 @@ var SessionConnected = class extends IdentifiedSession { this.heartbeatMissTimeout = setTimeout(() => { this.log?.info( `closing connection to ${this.to} due to inactivity (missed ${maxMisses} heartbeats which is ${missDuration}ms)`, - this.loggingMetadata, + this.loggingMetadata ); this.telemetry.span.addEvent( - 'closing connection due to missing heartbeat', + "closing connection due to missing heartbeat" ); this.conn.close(); }, missDuration); @@ -3519,13 +3363,13 @@ var SessionConnected = class extends IdentifiedSession { }, this.options.heartbeatIntervalMs); } sendHeartbeat() { - this.log?.debug('sending heartbeat', this.loggingMetadata); + this.log?.debug("sending heartbeat", this.loggingMetadata); const heartbeat = { - streamId: 'heartbeat', + streamId: "heartbeat", controlFlags: 1 /* AckBit */, payload: { - type: 'ACK', - }, + type: "ACK" + } }; this.send(heartbeat); } @@ -3533,7 +3377,7 @@ var SessionConnected = class extends IdentifiedSession { const parsedMsgRes = this.codec.fromBuffer(msg); if (!parsedMsgRes.ok) { this.listeners.onInvalidMessage( - `could not parse message: ${parsedMsgRes.reason}`, + `could not parse message: ${parsedMsgRes.reason}` ); return; } @@ -3544,19 +3388,19 @@ var SessionConnected = class extends IdentifiedSession { `received duplicate msg (got seq: ${parsedMsg.seq}, wanted seq: ${this.ack}), discarding`, { ...this.loggingMetadata, - transportMessage: parsedMsg, - }, + transportMessage: parsedMsg + } ); } else { const reason = `received out-of-order msg, closing connection (got seq: ${parsedMsg.seq}, wanted seq: ${this.ack})`; this.log?.error(reason, { ...this.loggingMetadata, transportMessage: parsedMsg, - tags: ['invariant-violation'], + tags: ["invariant-violation"] }); this.telemetry.span.setStatus({ code: SpanStatusCode.ERROR, - message: reason, + message: reason }); this.conn.close(); } @@ -3564,7 +3408,7 @@ var SessionConnected = class extends IdentifiedSession { } this.log?.debug(`received msg`, { ...this.loggingMetadata, - transportMessage: parsedMsg, + transportMessage: parsedMsg }); this.updateBookkeeping(parsedMsg.ack, parsedMsg.seq); if (!isAck(parsedMsg.controlFlags)) { @@ -3573,7 +3417,7 @@ var SessionConnected = class extends IdentifiedSession { } this.log?.debug(`discarding msg (ack bit set)`, { ...this.loggingMetadata, - transportMessage: parsedMsg, + transportMessage: parsedMsg }); if (!this.isActivelyHeartbeating) { this.sendHeartbeat(); @@ -3601,7 +3445,7 @@ var SessionConnected = class extends IdentifiedSession { // transport/sessionStateMachine/SessionBackingOff.ts var SessionBackingOff = class extends IdentifiedSessionWithGracePeriod { - state = 'BackingOff' /* BackingOff */; + state = "BackingOff" /* BackingOff */; listeners; backoffTimeout; constructor(props) { @@ -3624,7 +3468,7 @@ var SessionBackingOff = class extends IdentifiedSessionWithGracePeriod { }; // codec/adapter.ts -import { Value as Value2 } from '@sinclair/typebox/value'; +import { Value as Value2 } from "@sinclair/typebox/value"; var CodecMessageAdapter = class { constructor(codec) { this.codec = codec; @@ -3633,12 +3477,12 @@ var CodecMessageAdapter = class { try { return { ok: true, - value: this.codec.toBuffer(msg), + value: this.codec.toBuffer(msg) }; } catch (e) { return { ok: false, - reason: coerceErrorString(e), + reason: coerceErrorString(e) }; } } @@ -3648,17 +3492,17 @@ var CodecMessageAdapter = class { if (!Value2.Check(OpaqueTransportMessageSchema, parsedMsg)) { return { ok: false, - reason: 'transport message schema mismatch', + reason: "transport message schema mismatch" }; } return { ok: true, - value: parsedMsg, + value: parsedMsg }; } catch (e) { return { ok: false, - reason: coerceErrorString(e), + reason: coerceErrorString(e) }; } } @@ -3679,26 +3523,18 @@ function inheritSharedSession(session) { log: session.log, tracer: session.tracer, protocolVersion: session.protocolVersion, - codec: session.codec, + codec: session.codec }; } function inheritSharedSessionWithGrace(session) { return { ...inheritSharedSession(session), - graceExpiryTime: session.graceExpiryTime, + graceExpiryTime: session.graceExpiryTime }; } var SessionStateGraph = { entrypoints: { - NoConnection: ( - to, - from, - listeners, - options, - protocolVersion, - tracer, - log, - ) => { + NoConnection: (to, from, listeners, options, protocolVersion, tracer, log) => { const id = `session-${generateId()}`; const telemetry = createSessionTelemetryInfo(tracer, id, to, from); const sendBuffer = []; @@ -3717,11 +3553,11 @@ var SessionStateGraph = { protocolVersion, tracer, log, - codec: new CodecMessageAdapter(options.codec), + codec: new CodecMessageAdapter(options.codec) }); session.log?.info(`session ${session.id} created in NoConnection state`, { ...session.loggingMetadata, - tags: ['state-transition'], + tags: ["state-transition"] }); return session; }, @@ -3733,14 +3569,14 @@ var SessionStateGraph = { options, tracer, log, - codec: new CodecMessageAdapter(options.codec), + codec: new CodecMessageAdapter(options.codec) }); session.log?.info(`session created in WaitingForHandshake state`, { ...session.loggingMetadata, - tags: ['state-transition'], + tags: ["state-transition"] }); return session; - }, + } }, // All of the transitions 'move'/'consume' the old session and return a new one. // After a session is transitioned, any usage of the old session will throw. @@ -3752,14 +3588,14 @@ var SessionStateGraph = { const session = new SessionBackingOff({ backoffMs, listeners, - ...carriedState, + ...carriedState }); session.log?.info( `session ${session.id} transition from NoConnection to BackingOff`, { ...session.loggingMetadata, - tags: ['state-transition'], - }, + tags: ["state-transition"] + } ); return session; }, @@ -3769,14 +3605,14 @@ var SessionStateGraph = { const session = new SessionConnecting({ connPromise, listeners, - ...carriedState, + ...carriedState }); session.log?.info( `session ${session.id} transition from BackingOff to Connecting`, { ...session.loggingMetadata, - tags: ['state-transition'], - }, + tags: ["state-transition"] + } ); return session; }, @@ -3786,19 +3622,19 @@ var SessionStateGraph = { const session = new SessionHandshaking({ conn, listeners, - ...carriedState, + ...carriedState }); conn.telemetry = createConnectionTelemetryInfo( session.tracer, conn, - session.telemetry, + session.telemetry ); session.log?.info( `session ${session.id} transition from Connecting to Handshaking`, { ...session.loggingMetadata, - tags: ['state-transition'], - }, + tags: ["state-transition"] + } ); return session; }, @@ -3809,73 +3645,67 @@ var SessionStateGraph = { const session = new SessionConnected({ conn, listeners, - ...carriedState, + ...carriedState }); session.startMissingHeartbeatTimeout(); session.log?.info( `session ${session.id} transition from Handshaking to Connected`, { ...session.loggingMetadata, - tags: ['state-transition'], - }, + tags: ["state-transition"] + } ); return session; }, - WaitingForHandshakeToConnected: ( - pendingSession, - oldSession, - sessionId, - to, - propagationCtx, - listeners, - protocolVersion, - ) => { + WaitingForHandshakeToConnected: (pendingSession, oldSession, sessionId, to, propagationCtx, listeners, protocolVersion) => { const conn = pendingSession.conn; const { from, options } = pendingSession; - const carriedState = oldSession - ? // old session exists, inherit state - inheritSharedSession(oldSession) - : // old session does not exist, create new state - { - id: sessionId, - from, + const carriedState = oldSession ? ( + // old session exists, inherit state + inheritSharedSession(oldSession) + ) : ( + // old session does not exist, create new state + { + id: sessionId, + from, + to, + seq: 0, + ack: 0, + seqSent: 0, + sendBuffer: [], + telemetry: createSessionTelemetryInfo( + pendingSession.tracer, + sessionId, to, - seq: 0, - ack: 0, - seqSent: 0, - sendBuffer: [], - telemetry: createSessionTelemetryInfo( - pendingSession.tracer, - sessionId, - to, - from, - propagationCtx, - ), - options, - tracer: pendingSession.tracer, - log: pendingSession.log, - protocolVersion, - codec: new CodecMessageAdapter(options.codec), - }; + from, + propagationCtx + ), + options, + tracer: pendingSession.tracer, + log: pendingSession.log, + protocolVersion, + codec: new CodecMessageAdapter(options.codec) + } + ); pendingSession._handleStateExit(); oldSession?._handleStateExit(); const session = new SessionConnected({ conn, listeners, - ...carriedState, + ...carriedState }); session.startMissingHeartbeatTimeout(); conn.telemetry = createConnectionTelemetryInfo( session.tracer, conn, - session.telemetry, + session.telemetry ); session.log?.info( `session ${session.id} transition from WaitingForHandshake to Connected`, { ...session.loggingMetadata, - tags: ['state-transition'], - }, + tags: ["state-transition"] + } ); return session; }, @@ -3885,14 +3715,14 @@ var SessionStateGraph = { oldSession._handleStateExit(); const session = new SessionNoConnection({ listeners, - ...carriedState, + ...carriedState }); session.log?.info( `session ${session.id} transition from BackingOff to NoConnection`, { ...session.loggingMetadata, - tags: ['state-transition'], - }, + tags: ["state-transition"] + } ); return session; }, @@ -3902,14 +3732,14 @@ var SessionStateGraph = { oldSession._handleStateExit(); const session = new SessionNoConnection({ listeners, - ...carriedState, + ...carriedState }); session.log?.info( `session ${session.id} transition from Connecting to NoConnection`, { ...session.loggingMetadata, - tags: ['state-transition'], - }, + tags: ["state-transition"] + } ); return session; }, @@ -3919,38 +3749,37 @@ var SessionStateGraph = { oldSession._handleStateExit(); const session = new SessionNoConnection({ listeners, - ...carriedState, + ...carriedState }); session.log?.info( `session ${session.id} transition from Handshaking to NoConnection`, { ...session.loggingMetadata, - tags: ['state-transition'], - }, + tags: ["state-transition"] + } ); return session; }, ConnectedToNoConnection: (oldSession, listeners) => { const carriedState = inheritSharedSession(oldSession); - const graceExpiryTime = - Date.now() + oldSession.options.sessionDisconnectGraceMs; + const graceExpiryTime = Date.now() + oldSession.options.sessionDisconnectGraceMs; oldSession.conn.close(); oldSession._handleStateExit(); const session = new SessionNoConnection({ listeners, graceExpiryTime, - ...carriedState, + ...carriedState }); session.log?.info( `session ${session.id} transition from Connected to NoConnection`, { ...session.loggingMetadata, - tags: ['state-transition'], - }, + tags: ["state-transition"] + } ); return session; - }, - }, + } + } }; var transitions = SessionStateGraph.transition; var ClientSessionStateGraph = { @@ -3973,14 +3802,14 @@ var ClientSessionStateGraph = { // Handshaking -> NoConnection: connection closed or handshake timeout HandshakingToNoConnection: transitions.HandshakingToNoConnection, // Connected -> NoConnection: connection closed - ConnectedToNoConnection: transitions.ConnectedToNoConnection, + ConnectedToNoConnection: transitions.ConnectedToNoConnection // destroy/close paths // NoConnection -> x: grace period elapsed // BackingOff -> x: grace period elapsed // Connecting -> x: grace period elapsed // Handshaking -> x: grace period elapsed or invalid handshake message or handshake rejection // Connected -> x: grace period elapsed or invalid message - }, + } }; var ServerSessionStateGraph = { entrypoint: SessionStateGraph.entrypoints.WaitingForHandshake, @@ -3990,10 +3819,10 @@ var ServerSessionStateGraph = { WaitingForHandshakeToConnected: transitions.WaitingForHandshakeToConnected, // disconnect paths // Connected -> NoConnection: connection closed - ConnectedToNoConnection: transitions.ConnectedToNoConnection, + ConnectedToNoConnection: transitions.ConnectedToNoConnection // destroy/close paths // WaitingForHandshake -> x: handshake timeout elapsed or invalid handshake message or handshake rejection or connection closed - }, + } }; // transport/transport.ts @@ -4026,12 +3855,12 @@ var Transport = class { this.options = { ...defaultTransportOptions, ...providedOptions }; this.eventDispatcher = new EventDispatcher(); this.clientId = clientId; - this.status = 'open'; + this.status = "open"; this.sessions = /* @__PURE__ */ new Map(); this.tracer = getTracer(); } bindLogger(fn, level) { - if (typeof fn === 'function') { + if (typeof fn === "function") { this.log = createLogProxy(new BaseLogger(fn, level)); return; } @@ -4043,8 +3872,8 @@ var Transport = class { * @param message The received message. */ handleMsg(message) { - if (this.getStatus() !== 'open') return; - this.eventDispatcher.dispatchEvent('message', message); + if (this.getStatus() !== "open") return; + this.eventDispatcher.dispatchEvent("message", message); } /** * Adds a listener to this transport. @@ -4063,7 +3892,7 @@ var Transport = class { this.eventDispatcher.removeEventListener(type, handler); } protocolError(message) { - this.eventDispatcher.dispatchEvent('protocolError', message); + this.eventDispatcher.dispatchEvent("protocolError", message); } /** * Default close implementation for transports. You should override this in the downstream @@ -4071,13 +3900,13 @@ var Transport = class { * Closes the transport. Any messages sent while the transport is closed will be silently discarded. */ close() { - this.status = 'closed'; + this.status = "closed"; const sessions = Array.from(this.sessions.values()); for (const session of sessions) { this.deleteSession(session); } - this.eventDispatcher.dispatchEvent('transportStatus', { - status: this.status, + this.eventDispatcher.dispatchEvent("transportStatus", { + status: this.status }); this.eventDispatcher.removeAllListeners(); this.log?.info(`manually closed transport`, { clientId: this.clientId }); @@ -4092,18 +3921,18 @@ var Transport = class { const msg = `attempt to create session for ${session.to} but active session (${activeSession.id}) already exists`; this.log?.error(msg, { ...session.loggingMetadata, - tags: ['invariant-violation'], + tags: ["invariant-violation"] }); throw new Error(msg); } this.sessions.set(session.to, session); - this.eventDispatcher.dispatchEvent('sessionStatus', { - status: 'created', - session, + this.eventDispatcher.dispatchEvent("sessionStatus", { + status: "created", + session }); - this.eventDispatcher.dispatchEvent('sessionTransition', { + this.eventDispatcher.dispatchEvent("sessionTransition", { state: session.state, - id: session.id, + id: session.id }); } updateSession(session) { @@ -4112,7 +3941,7 @@ var Transport = class { const msg = `attempt to transition session for ${session.to} but no active session exists`; this.log?.error(msg, { ...session.loggingMetadata, - tags: ['invariant-violation'], + tags: ["invariant-violation"] }); throw new Error(msg); } @@ -4120,69 +3949,66 @@ var Transport = class { const msg = `attempt to transition active session for ${session.to} but active session (${activeSession.id}) is different from handle (${session.id})`; this.log?.error(msg, { ...session.loggingMetadata, - tags: ['invariant-violation'], + tags: ["invariant-violation"] }); throw new Error(msg); } this.sessions.set(session.to, session); - this.eventDispatcher.dispatchEvent('sessionTransition', { + this.eventDispatcher.dispatchEvent("sessionTransition", { state: session.state, - id: session.id, + id: session.id }); } deleteSession(session, options) { if (session._isConsumed) return; const loggingMetadata = session.loggingMetadata; if (loggingMetadata.tags && options?.unhealthy) { - loggingMetadata.tags.push('unhealthy-session'); + loggingMetadata.tags.push("unhealthy-session"); } session.log?.info(`closing session ${session.id}`, loggingMetadata); - this.eventDispatcher.dispatchEvent('sessionStatus', { - status: 'closing', - session, + this.eventDispatcher.dispatchEvent("sessionStatus", { + status: "closing", + session }); const to = session.to; session.close(); this.sessions.delete(to); - this.eventDispatcher.dispatchEvent('sessionStatus', { - status: 'closed', - session: { id: session.id, to }, + this.eventDispatcher.dispatchEvent("sessionStatus", { + status: "closed", + session: { id: session.id, to } }); } // common listeners onSessionGracePeriodElapsed(session) { this.log?.info( `session to ${session.to} grace period elapsed, closing`, - session.loggingMetadata, + session.loggingMetadata ); this.deleteSession(session); } onConnectingFailed(session) { - const noConnectionSession = - SessionStateGraph.transition.ConnectingToNoConnection(session, { - onSessionGracePeriodElapsed: () => { - this.onSessionGracePeriodElapsed(noConnectionSession); - }, - }); + const noConnectionSession = SessionStateGraph.transition.ConnectingToNoConnection(session, { + onSessionGracePeriodElapsed: () => { + this.onSessionGracePeriodElapsed(noConnectionSession); + } + }); this.updateSession(noConnectionSession); return noConnectionSession; } onConnClosed(session) { let noConnectionSession; - if (session.state === 'Handshaking' /* Handshaking */) { - noConnectionSession = - SessionStateGraph.transition.HandshakingToNoConnection(session, { - onSessionGracePeriodElapsed: () => { - this.onSessionGracePeriodElapsed(noConnectionSession); - }, - }); + if (session.state === "Handshaking" /* Handshaking */) { + noConnectionSession = SessionStateGraph.transition.HandshakingToNoConnection(session, { + onSessionGracePeriodElapsed: () => { + this.onSessionGracePeriodElapsed(noConnectionSession); + } + }); } else { - noConnectionSession = - SessionStateGraph.transition.ConnectedToNoConnection(session, { - onSessionGracePeriodElapsed: () => { - this.onSessionGracePeriodElapsed(noConnectionSession); - }, - }); + noConnectionSession = SessionStateGraph.transition.ConnectedToNoConnection(session, { + onSessionGracePeriodElapsed: () => { + this.onSessionGracePeriodElapsed(noConnectionSession); + } + }); } this.updateSession(noConnectionSession); return noConnectionSession; @@ -4196,20 +4022,20 @@ var Transport = class { * onto a session object is not recommended. */ getSessionBoundSendFn(to, sessionId) { - if (this.getStatus() !== 'open') { - throw new Error('cannot get a bound send function on a closed transport'); + if (this.getStatus() !== "open") { + throw new Error("cannot get a bound send function on a closed transport"); } return (msg) => { const session = this.sessions.get(to); if (!session) { throw new Error( - `session scope for ${sessionId} has ended (close), can't send`, + `session scope for ${sessionId} has ended (close), can't send` ); } const sameSession = session.id === sessionId; if (!sameSession || session._isConsumed) { throw new Error( - `session scope for ${sessionId} has ended (transition), can't send`, + `session scope for ${sessionId} has ended (transition), can't send` ); } const res = session.send(msg); @@ -4222,7 +4048,7 @@ var Transport = class { }; // transport/server.ts -import { Value as Value3 } from '@sinclair/typebox/value'; +import { Value as Value3 } from "@sinclair/typebox/value"; var ServerTransport = class extends Transport { /** * The options for this transport. @@ -4243,11 +4069,11 @@ var ServerTransport = class extends Transport { this.sessions = /* @__PURE__ */ new Map(); this.options = { ...defaultServerTransportOptions, - ...providedOptions, + ...providedOptions }; this.log?.info(`initiated server transport`, { clientId: this.clientId, - protocolVersion: currentProtocolVersion, + protocolVersion: currentProtocolVersion }); } extendHandshake(options) { @@ -4262,10 +4088,10 @@ var ServerTransport = class extends Transport { super.deleteSession(session, options); } handleConnection(conn) { - if (this.getStatus() !== 'open') return; + if (this.getStatus() !== "open") return; this.log?.info(`new incoming connection`, { ...conn.loggingMetadata, - clientId: this.clientId, + clientId: this.clientId }); let receivedHandshake = false; const pendingSession = ServerSessionStateGraph.entrypoint( @@ -4275,7 +4101,7 @@ var ServerTransport = class extends Transport { onConnectionClosed: () => { this.log?.warn( `connection from unknown closed before handshake finished`, - pendingSession.loggingMetadata, + pendingSession.loggingMetadata ); this.deletePendingSession(pendingSession); }, @@ -4283,14 +4109,14 @@ var ServerTransport = class extends Transport { const errorString = coerceErrorString(err); this.log?.warn( `connection from unknown errored before handshake finished: ${errorString}`, - pendingSession.loggingMetadata, + pendingSession.loggingMetadata ); this.deletePendingSession(pendingSession); }, onHandshakeTimeout: () => { this.log?.warn( `connection from unknown timed out before handshake finished`, - pendingSession.loggingMetadata, + pendingSession.loggingMetadata ); this.deletePendingSession(pendingSession); }, @@ -4301,8 +4127,8 @@ var ServerTransport = class extends Transport { { ...pendingSession.loggingMetadata, connectedTo: msg.from, - transportMessage: msg, - }, + transportMessage: msg + } ); this.deletePendingSession(pendingSession); return; @@ -4313,26 +4139,26 @@ var ServerTransport = class extends Transport { onInvalidHandshake: (reason, code) => { this.log?.error( `invalid handshake: ${reason}`, - pendingSession.loggingMetadata, + pendingSession.loggingMetadata ); this.deletePendingSession(pendingSession); this.protocolError({ type: ProtocolError.HandshakeFailed, code, - message: reason, + message: reason }); - }, + } }, this.options, this.tracer, - this.log, + this.log ); this.pendingSessions.add(pendingSession); } rejectHandshakeRequest(session, to, reason, code, metadata) { session.conn.telemetry?.span.setStatus({ code: SpanStatusCode.ERROR, - message: reason, + message: reason }); this.log?.warn(reason, metadata); const responseMsg = handshakeResponseMessage({ @@ -4341,18 +4167,18 @@ var ServerTransport = class extends Transport { status: { ok: false, code, - reason, - }, + reason + } }); const res = session.sendHandshake(responseMsg); if (!res.ok) { this.log?.error(`failed to send handshake response: ${res.reason}`, { ...session.loggingMetadata, - transportMessage: responseMsg, + transportMessage: responseMsg }); this.protocolError({ type: ProtocolError.MessageSendFailure, - message: res.reason, + message: res.reason }); this.deletePendingSession(session); return; @@ -4360,7 +4186,7 @@ var ServerTransport = class extends Transport { this.protocolError({ type: ProtocolError.HandshakeFailed, code, - message: reason, + message: reason }); this.deletePendingSession(session); } @@ -4369,16 +4195,16 @@ var ServerTransport = class extends Transport { this.rejectHandshakeRequest( session, msg.from, - 'received invalid handshake request', - 'MALFORMED_HANDSHAKE', + "received invalid handshake request", + "MALFORMED_HANDSHAKE", { ...session.loggingMetadata, transportMessage: msg, connectedTo: msg.from, validationErrors: [ - ...Value3.Errors(ControlMessageHandshakeRequestSchema, msg.payload), - ], - }, + ...Value3.Errors(ControlMessageHandshakeRequestSchema, msg.payload) + ] + } ); return; } @@ -4388,81 +4214,71 @@ var ServerTransport = class extends Transport { session, msg.from, `expected protocol version oneof [${acceptedProtocolVersions.toString()}], got ${gotVersion}`, - 'PROTOCOL_VERSION_MISMATCH', + "PROTOCOL_VERSION_MISMATCH", { ...session.loggingMetadata, connectedTo: msg.from, - transportMessage: msg, - }, + transportMessage: msg + } ); return; } let parsedMetadata = {}; if (this.handshakeExtensions) { - if ( - !Value3.Check(this.handshakeExtensions.schema, msg.payload.metadata) - ) { + if (!Value3.Check(this.handshakeExtensions.schema, msg.payload.metadata)) { this.rejectHandshakeRequest( session, msg.from, - 'received malformed handshake metadata', - 'MALFORMED_HANDSHAKE_META', + "received malformed handshake metadata", + "MALFORMED_HANDSHAKE_META", { ...session.loggingMetadata, connectedTo: msg.from, validationErrors: [ ...Value3.Errors( this.handshakeExtensions.schema, - msg.payload.metadata, - ), - ], - }, + msg.payload.metadata + ) + ] + } ); return; } const previousParsedMetadata = this.sessionHandshakeMetadata.get( - msg.from, + msg.from + ); + const parsedMetadataOrFailureCode = await this.handshakeExtensions.validate( + msg.payload.metadata, + previousParsedMetadata ); - const parsedMetadataOrFailureCode = - await this.handshakeExtensions.validate( - msg.payload.metadata, - previousParsedMetadata, - ); if (session._isConsumed) { return; } - if ( - Value3.Check( - HandshakeErrorCustomHandlerFatalResponseCodes, - parsedMetadataOrFailureCode, - ) - ) { + if (Value3.Check( + HandshakeErrorCustomHandlerFatalResponseCodes, + parsedMetadataOrFailureCode + )) { this.rejectHandshakeRequest( session, msg.from, - 'rejected by handshake handler', + "rejected by handshake handler", parsedMetadataOrFailureCode, { ...session.loggingMetadata, connectedTo: msg.from, - clientId: this.clientId, - }, + clientId: this.clientId + } ); return; } parsedMetadata = parsedMetadataOrFailureCode; } - let connectCase = 'new session'; - const clientNextExpectedSeq = - msg.payload.expectedSessionState.nextExpectedSeq; + let connectCase = "new session"; + const clientNextExpectedSeq = msg.payload.expectedSessionState.nextExpectedSeq; const clientNextSentSeq = msg.payload.expectedSessionState.nextSentSeq; let oldSession = this.sessions.get(msg.from); - if ( - this.options.enableTransparentSessionReconnects && - oldSession && - oldSession.id === msg.payload.sessionId - ) { - connectCase = 'transparent reconnection'; + if (this.options.enableTransparentSessionReconnects && oldSession && oldSession.id === msg.payload.sessionId) { + connectCase = "transparent reconnection"; const ourNextSeq = oldSession.nextSeq(); const ourAck = oldSession.ack; if (clientNextSentSeq > ourAck) { @@ -4470,12 +4286,12 @@ var ServerTransport = class extends Transport { session, msg.from, `client is in the future: server wanted next message to be ${ourAck} but client would have sent ${clientNextSentSeq}`, - 'SESSION_STATE_MISMATCH', + "SESSION_STATE_MISMATCH", { ...session.loggingMetadata, connectedTo: msg.from, - transportMessage: msg, - }, + transportMessage: msg + } ); return; } @@ -4484,56 +4300,53 @@ var ServerTransport = class extends Transport { session, msg.from, `server is in the future: client wanted next message to be ${clientNextExpectedSeq} but server would have sent ${ourNextSeq}`, - 'SESSION_STATE_MISMATCH', + "SESSION_STATE_MISMATCH", { ...session.loggingMetadata, connectedTo: msg.from, - transportMessage: msg, - }, + transportMessage: msg + } ); return; } - if (oldSession.state !== 'NoConnection' /* NoConnection */) { - const noConnectionSession = - ServerSessionStateGraph.transition.ConnectedToNoConnection( - oldSession, - { - onSessionGracePeriodElapsed: () => { - this.onSessionGracePeriodElapsed(noConnectionSession); - }, - }, - ); + if (oldSession.state !== "NoConnection" /* NoConnection */) { + const noConnectionSession = ServerSessionStateGraph.transition.ConnectedToNoConnection( + oldSession, + { + onSessionGracePeriodElapsed: () => { + this.onSessionGracePeriodElapsed(noConnectionSession); + } + } + ); oldSession = noConnectionSession; this.updateSession(oldSession); } } else if (oldSession) { - connectCase = 'hard reconnection'; + connectCase = "hard reconnection"; this.log?.info( `client is reconnecting to a new session (${msg.payload.sessionId}) with an old session (${oldSession.id}) already existing, closing old session`, { ...session.loggingMetadata, connectedTo: msg.from, - sessionId: msg.payload.sessionId, - }, + sessionId: msg.payload.sessionId + } ); this.deleteSession(oldSession); oldSession = void 0; } if (!oldSession && (clientNextSentSeq > 0 || clientNextExpectedSeq > 0)) { - connectCase = 'unknown session'; - const rejectionMessage = this.options.enableTransparentSessionReconnects - ? `client is trying to reconnect to a session the server don't know about: ${msg.payload.sessionId}` - : `client is attempting a transparent reconnect to a session but the server does not support it: ${msg.payload.sessionId}`; + connectCase = "unknown session"; + const rejectionMessage = this.options.enableTransparentSessionReconnects ? `client is trying to reconnect to a session the server don't know about: ${msg.payload.sessionId}` : `client is attempting a transparent reconnect to a session but the server does not support it: ${msg.payload.sessionId}`; this.rejectHandshakeRequest( session, msg.from, rejectionMessage, - 'SESSION_STATE_MISMATCH', + "SESSION_STATE_MISMATCH", { ...session.loggingMetadata, connectedTo: msg.from, - transportMessage: msg, - }, + transportMessage: msg + } ); return; } @@ -4542,82 +4355,81 @@ var ServerTransport = class extends Transport { `handshake from ${msg.from} ok (${connectCase}), responding with handshake success`, { ...session.loggingMetadata, - connectedTo: msg.from, - }, + connectedTo: msg.from + } ); const responseMsg = handshakeResponseMessage({ from: this.clientId, to: msg.from, status: { ok: true, - sessionId, - }, + sessionId + } }); const res = session.sendHandshake(responseMsg); if (!res.ok) { this.log?.error(`failed to send handshake response: ${res.reason}`, { ...session.loggingMetadata, - transportMessage: responseMsg, + transportMessage: responseMsg }); this.protocolError({ type: ProtocolError.MessageSendFailure, - message: res.reason, + message: res.reason }); this.deletePendingSession(session); return; } this.pendingSessions.delete(session); - const connectedSession = - ServerSessionStateGraph.transition.WaitingForHandshakeToConnected( - session, - // by this point oldSession is either no connection or we dont have an old session - oldSession, - sessionId, - msg.from, - msg.tracing, - { - onConnectionErrored: (err) => { - const errStr = coerceErrorString(err); - this.log?.warn( - `connection to ${connectedSession.to} errored: ${errStr}`, - connectedSession.loggingMetadata, - ); - }, - onConnectionClosed: () => { - this.log?.info( - `connection to ${connectedSession.to} closed`, - connectedSession.loggingMetadata, - ); - this.onConnClosed(connectedSession); - }, - onMessage: (msg2) => { - this.handleMsg(msg2); - }, - onInvalidMessage: (reason) => { - this.log?.error(`invalid message: ${reason}`, { - ...connectedSession.loggingMetadata, - transportMessage: msg, - }); - this.protocolError({ - type: ProtocolError.InvalidMessage, - message: reason, - }); - this.deleteSession(connectedSession, { unhealthy: true }); - }, - onMessageSendFailure: (msg2, reason) => { - this.log?.error(`failed to send message: ${reason}`, { - ...connectedSession.loggingMetadata, - transportMessage: msg2, - }); - this.protocolError({ - type: ProtocolError.MessageSendFailure, - message: reason, - }); - this.deleteSession(connectedSession, { unhealthy: true }); - }, + const connectedSession = ServerSessionStateGraph.transition.WaitingForHandshakeToConnected( + session, + // by this point oldSession is either no connection or we dont have an old session + oldSession, + sessionId, + msg.from, + msg.tracing, + { + onConnectionErrored: (err) => { + const errStr = coerceErrorString(err); + this.log?.warn( + `connection to ${connectedSession.to} errored: ${errStr}`, + connectedSession.loggingMetadata + ); }, - gotVersion, - ); + onConnectionClosed: () => { + this.log?.info( + `connection to ${connectedSession.to} closed`, + connectedSession.loggingMetadata + ); + this.onConnClosed(connectedSession); + }, + onMessage: (msg2) => { + this.handleMsg(msg2); + }, + onInvalidMessage: (reason) => { + this.log?.error(`invalid message: ${reason}`, { + ...connectedSession.loggingMetadata, + transportMessage: msg + }); + this.protocolError({ + type: ProtocolError.InvalidMessage, + message: reason + }); + this.deleteSession(connectedSession, { unhealthy: true }); + }, + onMessageSendFailure: (msg2, reason) => { + this.log?.error(`failed to send message: ${reason}`, { + ...connectedSession.loggingMetadata, + transportMessage: msg2 + }); + this.protocolError({ + type: ProtocolError.MessageSendFailure, + message: reason + }); + this.deleteSession(connectedSession, { unhealthy: true }); + } + }, + gotVersion + ); const bufferSendRes = connectedSession.sendBufferedMessages(); if (!bufferSendRes.ok) { return; @@ -4636,7 +4448,7 @@ var ServerTransport = class extends Transport { function cleanHeaders(headers) { const cleanedHeaders = {}; for (const [key, value] of Object.entries(headers)) { - if (!key.startsWith('sec-') && value) { + if (!key.startsWith("sec-") && value) { const cleanedValue = Array.isArray(value) ? value[0] : value; cleanedHeaders[key] = cleanedValue; } @@ -4648,22 +4460,22 @@ var WebSocketServerTransport = class extends ServerTransport { constructor(wss, clientId, providedOptions) { super(clientId, providedOptions); this.wss = wss; - this.wss.on('connection', this.connectionHandler); + this.wss.on("connection", this.connectionHandler); } connectionHandler = (ws, req) => { const conn = new WebSocketConnection(ws, { - headers: cleanHeaders(req.headersDistinct), + headers: cleanHeaders(req.headersDistinct) }); this.handleConnection(conn); }; close() { super.close(); - this.wss.off('connection', this.connectionHandler); + this.wss.off("connection", this.connectionHandler); } }; // python-client/tests/test_server_handshake.ts -import { Type as Type6 } from '@sinclair/typebox'; +import { Type as Type6 } from "@sinclair/typebox"; var ServiceSchema = createServiceSchema(); var HandshakeTestServiceSchema = ServiceSchema.define({ echo: Procedure.rpc({ @@ -4672,44 +4484,47 @@ var HandshakeTestServiceSchema = ServiceSchema.define({ responseError: Type6.Never(), async handler({ reqInit }) { return Ok({ response: reqInit.msg }); - }, - }), + } + }) }); var services = { - test: HandshakeTestServiceSchema, + test: HandshakeTestServiceSchema }; var handshakeSchema = Type6.Object({ token: Type6.String() }); async function main() { const httpServer = http.createServer(); const port = await new Promise((resolve, reject) => { - httpServer.listen(0, '127.0.0.1', () => { + httpServer.listen(0, "127.0.0.1", () => { const addr = httpServer.address(); - if (typeof addr === 'object' && addr) resolve(addr.port); + if (typeof addr === "object" && addr) resolve(addr.port); else reject(new Error("couldn't get port")); }); }); const wss = new WebSocketServer({ server: httpServer }); - const serverTransport = new WebSocketServerTransport(wss, 'HANDSHAKE_SERVER'); + const serverTransport = new WebSocketServerTransport( + wss, + "HANDSHAKE_SERVER" + ); const _server = createServer(serverTransport, services, { handshakeOptions: createServerHandshakeOptions( handshakeSchema, (metadata) => { - if (metadata.token !== 'valid-token') { - return 'REJECTED_BY_CUSTOM_HANDLER'; + if (metadata.token !== "valid-token") { + return "REJECTED_BY_CUSTOM_HANDLER"; } return {}; - }, - ), + } + ) }); process.stdout.write(`RIVER_PORT=${port} `); - process.on('SIGTERM', () => { + process.on("SIGTERM", () => { void _server.close().then(() => { httpServer.close(); process.exit(0); }); }); - process.on('SIGINT', () => { + process.on("SIGINT", () => { void _server.close().then(() => { httpServer.close(); process.exit(0); @@ -4717,6 +4532,6 @@ async function main() { }); } main().catch((err) => { - console.error('Failed to start handshake test server:', err); + console.error("Failed to start handshake test server:", err); process.exit(1); }); diff --git a/python-client/tests/test_session.py b/python-client/tests/test_session.py index f006ffda..24a1ac03 100644 --- a/python-client/tests/test_session.py +++ b/python-client/tests/test_session.py @@ -225,3 +225,214 @@ async def test_budget_restores_after_success(self, server_url: str): assert budget.budget_consumed <= 1 # mostly restored finally: await cleanup(client) + + +# ===================================================================== +# Grace Period Expiry During Active Procedures +# ===================================================================== + + +class TestGracePeriodActiveProcedures: + """Grace period expiry while a procedure is in-flight should + produce UNEXPECTED_DISCONNECT — mirroring disconnects.test.ts.""" + + @pytest.mark.asyncio + async def test_rpc_gets_disconnect_on_grace_expiry(self, server_url: str): + """RPC buffered during disconnect gets UNEXPECTED_DISCONNECT after grace.""" + client = await make_client(server_url) + try: + # Establish connection + result = await client.rpc("test", "add", {"n": 1}) + assert result["ok"] is True + + # Kill connection, disable reconnect + client.transport.reconnect_on_connection_drop = False + session = client.transport.sessions.get("SERVER") + assert session is not None + await session._ws.close() + await asyncio.sleep(0.1) + + # Start an RPC while disconnected — message gets buffered + rpc_task = asyncio.create_task(client.rpc("test", "add", {"n": 2})) + + # Wait for grace period (300ms) to expire + await asyncio.sleep(0.5) + + result = await asyncio.wait_for(rpc_task, timeout=2.0) + assert result["ok"] is False + assert result["payload"]["code"] == "UNEXPECTED_DISCONNECT" + finally: + await cleanup(client) + + @pytest.mark.asyncio + async def test_stream_gets_disconnect_on_grace_expiry(self, server_url: str): + """Active stream gets UNEXPECTED_DISCONNECT after grace period.""" + client = await make_client(server_url) + try: + stream = client.stream("test", "echo", {}) + stream.req_writable.write({"msg": "hello", "ignore": False}) + done, msg = await stream.res_readable.next() + assert not done + assert msg["ok"] is True + + # Kill connection, disable reconnect + client.transport.reconnect_on_connection_drop = False + session = client.transport.sessions.get("SERVER") + assert session is not None + await session._ws.close() + + # Wait for grace period to expire + await asyncio.sleep(0.5) + + done, msg = await stream.res_readable.next() + assert not done + assert msg["ok"] is False + assert msg["payload"]["code"] == "UNEXPECTED_DISCONNECT" + assert not stream.req_writable.is_writable() + finally: + await cleanup(client) + + @pytest.mark.asyncio + async def test_upload_gets_disconnect_on_grace_expiry(self, server_url: str): + """Upload in-flight gets UNEXPECTED_DISCONNECT after grace period.""" + client = await make_client(server_url) + try: + upload = client.upload("uploadable", "addMultiple", {}) + upload.req_writable.write({"n": 1}) + + # Ensure connection established + await asyncio.sleep(0.1) + + # Kill connection, disable reconnect + client.transport.reconnect_on_connection_drop = False + session = client.transport.sessions.get("SERVER") + assert session is not None + await session._ws.close() + + # Wait for grace period to expire + await asyncio.sleep(0.5) + + result = await asyncio.wait_for(upload.finalize(), timeout=2.0) + assert result["ok"] is False + assert result["payload"]["code"] == "UNEXPECTED_DISCONNECT" + finally: + await cleanup(client) + + @pytest.mark.asyncio + async def test_subscription_gets_disconnect_on_grace_expiry(self, server_url: str): + """Subscription gets UNEXPECTED_DISCONNECT after grace period.""" + client = await make_client(server_url) + try: + sub = client.subscribe("subscribable", "value", {}) + done, msg = await sub.res_readable.next() + assert not done + assert msg["ok"] is True + + # Kill connection, disable reconnect + client.transport.reconnect_on_connection_drop = False + session = client.transport.sessions.get("SERVER") + assert session is not None + await session._ws.close() + + # Wait for grace period to expire + await asyncio.sleep(0.5) + + done, msg = await sub.res_readable.next() + assert not done + assert msg["ok"] is False + assert msg["payload"]["code"] == "UNEXPECTED_DISCONNECT" + finally: + await cleanup(client) + + +# ===================================================================== +# Reconnect After Grace Expiry +# ===================================================================== + + +class TestReconnectAfterGrace: + @pytest.mark.asyncio + async def test_rpc_after_grace_expiry_creates_new_session(self, server_url: str): + """After grace period expires, a new RPC creates a fresh session.""" + client = await make_client(server_url) + try: + result = await client.rpc("test", "add", {"n": 1}) + assert result["ok"] is True + + old_session = client.transport.sessions.get("SERVER") + assert old_session is not None + old_id = old_session.id + + # Kill connection, disable reconnect, wait for grace expiry + client.transport.reconnect_on_connection_drop = False + await old_session._ws.close() + await asyncio.sleep(0.5) + + # Session should be gone + assert client.transport.sessions.get("SERVER") is None + + # Re-enable reconnect and make a new RPC + client.transport.reconnect_on_connection_drop = True + result = await client.rpc("test", "add", {"n": 2}) + assert result["ok"] is True + + # Should have a new session with a different ID + new_session = client.transport.sessions.get("SERVER") + assert new_session is not None + assert new_session.id != old_id + finally: + await cleanup(client) + + @pytest.mark.asyncio + async def test_connect_on_invoke_false_no_reconnect(self, server_url: str): + """With connect_on_invoke=False, no reconnect after grace expiry. + + The RPC buffers the message but never connects, so it hangs. + We verify this by checking the transport state rather than + waiting for an RPC result. + """ + transport = WebSocketClientTransport( + ws_url=server_url, + client_id=None, + server_id="SERVER", + codec=NaiveJsonCodec(), + options=SHORT_OPTIONS, + ) + client = RiverClient( + transport, + server_id="SERVER", + connect_on_invoke=False, + eagerly_connect=True, + ) + try: + # Wait for eager connection + await asyncio.sleep(0.3) + session = transport.sessions.get("SERVER") + assert session is not None + + # Make an RPC to verify connection works + result = await client.rpc("test", "add", {"n": 1}) + assert result["ok"] is True + + # Kill connection, disable reconnect, wait for grace expiry + transport.reconnect_on_connection_drop = False + await session._ws.close() + await asyncio.sleep(0.5) + + # Session should be gone after grace expiry + assert transport.sessions.get("SERVER") is None + + # Re-enable reconnect on drop but connect_on_invoke is still false + transport.reconnect_on_connection_drop = True + + # With connect_on_invoke=False, the transport won't connect. + # Closing the transport now should produce UNEXPECTED_DISCONNECT + # for any pending procedures. + await transport.close() + + result = await client.rpc("test", "add", {"n": 2}) + assert result["ok"] is False + assert result["payload"]["code"] == "UNEXPECTED_DISCONNECT" + finally: + # transport already closed above + pass From 4b5a091ebf7e384e832b696137e566808a681dcc Mon Sep 17 00:00:00 2001 From: Jacky Zhao Date: Mon, 2 Mar 2026 18:23:04 -0800 Subject: [PATCH 11/29] fix --- python-client/pyproject.toml | 1 - python-client/river/client.py | 2 +- python-client/river/codec.py | 31 +++-- python-client/river/codegen/emitter.py | 19 ++- python-client/river/session.py | 5 +- python-client/river/streams.py | 5 +- python-client/tests/test_e2e.py | 148 ++++++++++++++++++------ python-client/tests/test_equivalence.py | 24 ++-- python-client/tests/test_handshake.py | 39 ++----- python-client/tests/test_session.py | 148 ++++++++---------------- python-client/tests/test_utils.py | 120 +++++++++++++++++++ 11 files changed, 349 insertions(+), 193 deletions(-) create mode 100644 python-client/tests/test_utils.py diff --git a/python-client/pyproject.toml b/python-client/pyproject.toml index 17955ec4..8d6dc10e 100644 --- a/python-client/pyproject.toml +++ b/python-client/pyproject.toml @@ -6,7 +6,6 @@ build-backend = "setuptools.build_meta" name = "river-client" version = "0.1.0" description = "Python client for River protocol v2.0" -readme = "README.md" requires-python = ">=3.10" license = {text = "MIT"} dependencies = [ diff --git a/python-client/river/client.py b/python-client/river/client.py index 80f45996..a2557651 100644 --- a/python-client/river/client.py +++ b/python-client/river/client.py @@ -324,7 +324,7 @@ def write_cb(raw_value: Any) -> None: pass close_readable() if req_writable.is_writable(): - req_writable._closed = True + req_writable.close() def close_cb() -> None: nonlocal clean_close diff --git a/python-client/river/codec.py b/python-client/river/codec.py index 8bbabd84..826cc7b3 100644 --- a/python-client/river/codec.py +++ b/python-client/river/codec.py @@ -24,13 +24,25 @@ def from_buffer(self, buf: bytes) -> dict[str, Any]: ... -class _CustomEncoder(json.JSONEncoder): - """JSON encoder with support for bytes and large ints.""" - - def default(self, o: Any) -> Any: - if isinstance(o, (bytes, bytearray)): - return {"$t": base64.b64encode(o).decode("ascii")} - return super().default(o) +_MAX_SAFE_INTEGER = 2**53 - 1 +_MIN_SAFE_INTEGER = -(2**53 - 1) + + +def _prepare_for_json(obj: Any) -> Any: + """Recursively replace bytes and large ints with wire markers.""" + if isinstance(obj, (bytes, bytearray)): + return {"$t": base64.b64encode(obj).decode("ascii")} + if isinstance(obj, bool): + return obj + if isinstance(obj, int): + if obj > _MAX_SAFE_INTEGER or obj < _MIN_SAFE_INTEGER: + return {"$b": str(obj)} + return obj + if isinstance(obj, dict): + return {k: _prepare_for_json(v) for k, v in obj.items()} + if isinstance(obj, (list, tuple)): + return [_prepare_for_json(v) for v in obj] + return obj def _custom_object_hook(obj: dict) -> Any: @@ -48,9 +60,8 @@ class NaiveJsonCodec(Codec): name = "naive" def to_buffer(self, obj: dict[str, Any]) -> bytes: - return json.dumps(obj, cls=_CustomEncoder, separators=(",", ":")).encode( - "utf-8" - ) + prepared = _prepare_for_json(obj) + return json.dumps(prepared, separators=(",", ":")).encode("utf-8") def from_buffer(self, buf: bytes) -> dict[str, Any]: return json.loads(buf.decode("utf-8"), object_hook=_custom_object_hook) diff --git a/python-client/river/codegen/emitter.py b/python-client/river/codegen/emitter.py index 8213c338..1bdf23c8 100644 --- a/python-client/river/codegen/emitter.py +++ b/python-client/river/codegen/emitter.py @@ -11,7 +11,12 @@ import jinja2 -from river.codegen.schema import SchemaIR, ServiceDef, _to_pascal_case +from river.codegen.schema import ( + SchemaIR, + ServiceDef, + _sanitize_identifier, + _to_pascal_case, +) _TEMPLATE_DIR = Path(__file__).parent / "templates" @@ -123,13 +128,19 @@ def render_service_client(svc: ServiceDef, ir: SchemaIR, import_prefix: str) -> ) +def _module_name(service_name: str) -> str: + """Sanitize a service name for use as a Python module name.""" + return _sanitize_identifier(service_name) + + def render_init(ir: SchemaIR, import_prefix: str) -> str: imports = [] for svc in ir.services: + mod_name = _module_name(svc.name) if import_prefix == ".": - mod = f".{svc.name}_client" + mod = f".{mod_name}_client" else: - mod = f"{import_prefix}{svc.name}_client" + mod = f"{import_prefix}{mod_name}_client" imports.append((mod, f"{svc.class_name}Client")) imports.sort(key=lambda x: x[0]) @@ -165,7 +176,7 @@ def _write(name: str, content: str) -> None: for svc in ir.services: _write( - f"{svc.name}_client.py", + f"{_module_name(svc.name)}_client.py", render_service_client(svc, ir, import_prefix), ) diff --git a/python-client/river/session.py b/python-client/river/session.py index 996cd3ad..7287a6b7 100644 --- a/python-client/river/session.py +++ b/python-client/river/session.py @@ -141,6 +141,9 @@ def send(self, partial: PartialTransportMessage) -> tuple[bool, str]: if self.state == SessionState.CONNECTED and self._ws is not None: ok, result = self._send_over_wire(msg) if not ok: + # Remove the unsendable message from the buffer so it + # doesn't poison retransmit state. + self.send_buffer = [m for m in self.send_buffer if m.id != msg.id] return False, result return True, msg.id @@ -339,6 +342,6 @@ def create_handshake_request(self, metadata: Any = None) -> TransportMessage: seq=0, ack=0, payload=payload, - stream_id="handshake", + stream_id=generate_id(), control_flags=0, ) diff --git a/python-client/river/streams.py b/python-client/river/streams.py index b1dc29e3..375c47f9 100644 --- a/python-client/river/streams.py +++ b/python-client/river/streams.py @@ -210,11 +210,14 @@ def close(self, value: T | None = None) -> None: """Close the stream, optionally writing a final value.""" if self._closed: return # Idempotent - self._closed = True if value is not None: self._write_cb(value) + self._closed = True + # Nullify callbacks after invocation to prevent reuse (matches TS) + self._write_cb = lambda _: None # type: ignore[assignment] if self._close_cb: self._close_cb() + self._close_cb = None def is_writable(self) -> bool: return not self._closed diff --git a/python-client/tests/test_e2e.py b/python-client/tests/test_e2e.py index 917b7b9d..81272a7d 100644 --- a/python-client/tests/test_e2e.py +++ b/python-client/tests/test_e2e.py @@ -13,6 +13,7 @@ from river.client import RiverClient from river.codec import NaiveJsonCodec from river.transport import WebSocketClientTransport +from tests.test_utils import wait_for_connected # -- helpers -- @@ -390,7 +391,7 @@ async def test_cancel_rpc(self, server_url: str): abort_evt = asyncio.Event() async def do_abort(): - await asyncio.sleep(0.2) + await wait_for_connected(client.transport) abort_evt.set() asyncio.ensure_future(do_abort()) @@ -411,10 +412,8 @@ async def test_cancel_stream(self, server_url: str): stream = client.stream( "cancel", "blockingStream", {}, abort_signal=abort_evt ) - # Give server time to receive and process the init message - await asyncio.sleep(0.2) + await wait_for_connected(client.transport) abort_evt.set() - await asyncio.sleep(0) results = await stream.res_readable.collect() assert len(results) == 1 @@ -433,8 +432,7 @@ async def test_cancel_upload(self, server_url: str): upload = client.upload( "cancel", "blockingUpload", {}, abort_signal=abort_evt ) - # Give server time to receive - await asyncio.sleep(0.2) + await wait_for_connected(client.transport) abort_evt.set() result = await upload.finalize() @@ -453,10 +451,8 @@ async def test_cancel_subscription(self, server_url: str): sub = client.subscribe( "cancel", "blockingSubscription", {}, abort_signal=abort_evt ) - # Give server time to receive - await asyncio.sleep(0.2) + await wait_for_connected(client.transport) abort_evt.set() - await asyncio.sleep(0) done, msg = await sub.res_readable.next() assert not done @@ -489,12 +485,8 @@ async def test_stream_idempotent_close(self, server_url: str): assert msg["ok"] is True assert msg["payload"]["response"] == "abc" - # Wait for server close to be received - await asyncio.sleep(0.1) - # Abort after stream completed - should be a no-op abort_evt.set() - await asyncio.sleep(0.05) # Drain any remaining messages - should be done or at most a cancel done, val = await stream.res_readable.next() @@ -522,7 +514,6 @@ async def test_subscription_idempotent_close(self, server_url: str): # Abort abort_evt.set() - await asyncio.sleep(0.05) # Read the cancel done, msg = await sub.res_readable.next() @@ -549,13 +540,11 @@ async def test_cancellation_after_transport_close(self, server_url: str): # Close the transport await client.transport.close() - await asyncio.sleep(0.05) # Closing writable after transport close should be safe stream.req_writable.close() # Aborting after transport close should be safe abort_evt.set() - await asyncio.sleep(0.05) # No crash = success finally: # Transport already closed @@ -578,9 +567,7 @@ async def test_eagerly_connect(self, server_url: str): ) client = RiverClient(transport, server_id="SERVER", eagerly_connect=True) try: - # Wait for the connection to be established - await asyncio.sleep(0.5) - # Should have a session now + await wait_for_connected(transport) assert len(transport.sessions) > 0 # Verify the connection works by making a call result = await client.rpc("test", "add", {"n": 1}) @@ -598,8 +585,6 @@ class TestTransparentReconnect: @pytest.mark.asyncio async def test_reconnect_with_concurrent_streams(self, server_url: str): """Multiple concurrent streams survive a connection drop and reconnect.""" - from river.session import SessionState - transport = WebSocketClientTransport( ws_url=server_url, server_id="SERVER", @@ -632,15 +617,7 @@ async def test_reconnect_with_concurrent_streams(self, server_url: str): await session._ws.close() # Wait for reconnection - reconnected = asyncio.Event() - - def on_transition(evt): - if evt.get("state") == SessionState.CONNECTED: - reconnected.set() - - transport.add_event_listener("sessionTransition", on_transition) - await asyncio.wait_for(reconnected.wait(), timeout=5.0) - transport.remove_event_listener("sessionTransition", on_transition) + await wait_for_connected(transport) # Send more messages on all three streams after reconnect stream_a.req_writable.write({"msg": "2", "ignore": False}) @@ -1331,9 +1308,6 @@ async def test_cancel_cleans_up_listeners(self, server_url: str): abort.set() await stream.res_readable.next() # consume CANCEL error - # Give the event loop a tick to run cleanup - await asyncio.sleep(0.05) - assert transport._events.listener_count("message") == before_msg assert transport._events.listener_count("sessionStatus") == before_ss finally: @@ -1358,7 +1332,6 @@ async def test_repeated_cancels_do_not_leak(self, server_url: str): ) abort.set() await stream.res_readable.next() - await asyncio.sleep(0.01) assert transport._events.listener_count("message") == before finally: @@ -1387,11 +1360,8 @@ async def test_abort_task_cancelled_on_normal_close(self, server_url: str): done2, _ = await stream.res_readable.next() assert done2 - await asyncio.sleep(0.05) - # Setting the signal now should be harmless (no stale cancel) abort.set() - await asyncio.sleep(0.05) finally: await transport.close() @@ -1427,3 +1397,107 @@ async def test_finalize_after_explicit_close(self, server_url: str): assert result["payload"]["result"] == 2 finally: await cleanup_client(client) + + +# ===================================================================== +# Protocol conformance tests +# ===================================================================== + + +class TestProtocolConformance: + """Tests verifying Python client matches TS protocol behavior.""" + + def test_handshake_stream_id_is_random(self): + """Handshake streamId should be a random ID, not a fixed string. + + TS uses generateId() for handshake streamId; Python must match. + """ + from river.codec import CodecMessageAdapter, NaiveJsonCodec + from river.session import Session + + codec = CodecMessageAdapter(NaiveJsonCodec()) + s1 = Session("sess1", "client", "server", codec) + s2 = Session("sess2", "client", "server", codec) + + hs1 = s1.create_handshake_request() + hs2 = s2.create_handshake_request() + + # streamId should NOT be the fixed string "handshake" + assert hs1.stream_id != "handshake" + # streamId should be random (different between sessions) + assert hs1.stream_id != hs2.stream_id + # Should have a reasonable length (like generate_id output) + assert len(hs1.stream_id) > 8 + + def test_readable_push_after_break_is_noop(self): + """push_value after break_() should not buffer (memory leak fix).""" + from river.streams import Readable + + r: Readable = Readable() + r._push_value({"val": 1}) + r.break_() + # After break, queue should be cleared + assert not r._has_values_in_queue() + # Pushing more values should be silently discarded + r._push_value({"val": 2}) + r._push_value({"val": 3}) + assert not r._has_values_in_queue() + + def test_writable_close_nullifies_callbacks(self): + """After close(), write/close callbacks should not be invocable. + + TS nullifies callbacks after close to prevent reuse. + """ + from river.streams import Writable + + write_count = [0] + close_count = [0] + w: Writable = Writable( + write_cb=lambda x: write_count.__setitem__(0, write_count[0] + 1), + close_cb=lambda: close_count.__setitem__(0, close_count[0] + 1), + ) + w.close() + assert close_count[0] == 1 + + # After close, the callbacks should not fire again even if we + # bypass the _closed check (internal invariant) + w._closed = False + w.close() + # Should still be 1 since callbacks were nullified + assert close_count[0] == 1 + + def test_heartbeat_stream_id_is_fixed(self): + """Heartbeat streamId should be 'heartbeat' (matching TS).""" + from river.types import heartbeat_message + + hb = heartbeat_message() + assert hb.stream_id == "heartbeat" + + def test_handshake_payload_matches_ts_schema(self): + """Handshake request payload has all required fields.""" + from river.types import PROTOCOL_VERSION, handshake_request_payload + + payload = handshake_request_payload( + session_id="test-session", + next_expected_seq=0, + next_sent_seq=0, + metadata={"token": "abc"}, + ) + assert payload["type"] == "HANDSHAKE_REQ" + assert payload["protocolVersion"] == PROTOCOL_VERSION + assert payload["sessionId"] == "test-session" + assert payload["expectedSessionState"]["nextExpectedSeq"] == 0 + assert payload["expectedSessionState"]["nextSentSeq"] == 0 + assert payload["metadata"] == {"token": "abc"} + + def test_handshake_payload_omits_metadata_when_none(self): + """Handshake without metadata should not include metadata field.""" + from river.types import handshake_request_payload + + payload = handshake_request_payload( + session_id="test-session", + next_expected_seq=0, + next_sent_seq=0, + metadata=None, + ) + assert "metadata" not in payload diff --git a/python-client/tests/test_equivalence.py b/python-client/tests/test_equivalence.py index 7eb43897..1392c235 100644 --- a/python-client/tests/test_equivalence.py +++ b/python-client/tests/test_equivalence.py @@ -15,6 +15,7 @@ from river.codec import Codec from river.session import SessionOptions from river.transport import WebSocketClientTransport +from tests.test_utils import wait_for_connected, wait_for_session_gone # -- helpers -- @@ -400,7 +401,6 @@ async def test_subscription_abort(self, codec_and_url: tuple[Codec, str]): assert msg["ok"] is True abort_evt.set() - await asyncio.sleep(0.05) done, msg = await sub.res_readable.next() assert not done @@ -438,7 +438,8 @@ async def test_cancel_rpc(self, codec_and_url: tuple[Codec, str]): abort_evt = asyncio.Event() async def trigger(): - await asyncio.sleep(0.2) + # Wait for connection, then cancel + await wait_for_connected(client.transport) abort_evt.set() asyncio.ensure_future(trigger()) @@ -459,9 +460,8 @@ async def test_cancel_stream(self, codec_and_url: tuple[Codec, str]): stream = client.stream( "cancel", "blockingStream", {}, abort_signal=abort_evt ) - await asyncio.sleep(0.2) + await wait_for_connected(client.transport) abort_evt.set() - await asyncio.sleep(0) results = await stream.res_readable.collect() assert len(results) == 1 @@ -480,7 +480,7 @@ async def test_cancel_upload(self, codec_and_url: tuple[Codec, str]): upload = client.upload( "cancel", "blockingUpload", {}, abort_signal=abort_evt ) - await asyncio.sleep(0.2) + await wait_for_connected(client.transport) abort_evt.set() result = await upload.finalize() @@ -497,11 +497,13 @@ async def test_cancel_subscription(self, codec_and_url: tuple[Codec, str]): try: abort_evt = asyncio.Event() sub = client.subscribe( - "cancel", "blockingSubscription", {}, abort_signal=abort_evt + "cancel", + "blockingSubscription", + {}, + abort_signal=abort_evt, ) - await asyncio.sleep(0.2) + await wait_for_connected(client.transport) abort_evt.set() - await asyncio.sleep(0) done, msg = await sub.res_readable.next() assert not done @@ -525,8 +527,8 @@ async def test_cancel_after_completion_is_noop( assert result["ok"] is True assert result["payload"]["done"] is True + # Cancel after completion — should be safe no-op abort_evt.set() - await asyncio.sleep(0.05) finally: await cleanup(client) @@ -542,7 +544,6 @@ async def test_cancel_after_transport_close_is_safe( await client.transport.close() abort_evt.set() - await asyncio.sleep(0.05) # ===================================================================== @@ -602,8 +603,7 @@ async def test_mid_stream_disconnect(self, codec_and_url: tuple[Codec, str]): if session._ws is not None: await session._ws.close() - # Wait for short grace period to expire - await asyncio.sleep(0.4) + await wait_for_session_gone(client.transport) # Session destroyed → stream gets UNEXPECTED_DISCONNECT done, msg = await stream.res_readable.next() diff --git a/python-client/tests/test_handshake.py b/python-client/tests/test_handshake.py index 549d9ee8..978afe53 100644 --- a/python-client/tests/test_handshake.py +++ b/python-client/tests/test_handshake.py @@ -6,13 +6,12 @@ from __future__ import annotations -import asyncio - import pytest from river.client import RiverClient from river.codec import NaiveJsonCodec from river.transport import WebSocketClientTransport +from tests.test_utils import wait_for_connected, wait_for_event async def make_handshake_client( @@ -65,22 +64,13 @@ async def test_handshake_with_invalid_metadata_emits_error( codec=NaiveJsonCodec(), handshake_metadata={"token": "wrong-token"}, ) - - error_event = asyncio.Event() - errors: list[dict] = [] - - def on_error(e: dict) -> None: - errors.append(e) - error_event.set() - - transport.add_event_listener("protocolError", on_error) - try: transport.connect("HANDSHAKE_SERVER") - # Wait for the handshake failure event - await asyncio.wait_for(error_event.wait(), timeout=5.0) - assert len(errors) > 0 - assert errors[0]["type"] in ("handshake_failed", "conn_retry_exceeded") + evt = await wait_for_event(transport, "protocolError") + assert evt["type"] in ( + "handshake_failed", + "conn_retry_exceeded", + ) finally: await transport.close() @@ -96,20 +86,10 @@ async def test_handshake_with_missing_metadata_emits_error( codec=NaiveJsonCodec(), handshake_metadata=None, ) - - error_event = asyncio.Event() - errors: list[dict] = [] - - def on_error(e: dict) -> None: - errors.append(e) - error_event.set() - - transport.add_event_listener("protocolError", on_error) - try: transport.connect("HANDSHAKE_SERVER") - await asyncio.wait_for(error_event.wait(), timeout=5.0) - assert len(errors) > 0 + evt = await wait_for_event(transport, "protocolError") + assert evt is not None finally: await transport.close() @@ -131,7 +111,8 @@ async def test_handshake_metadata_across_reconnect(self, handshake_server_url: s if ws is not None: await ws.close() - await asyncio.sleep(0.5) + # Wait for reconnect to complete + await wait_for_connected(client.transport, "HANDSHAKE_SERVER") result = await client.rpc("test", "echo", {"msg": "after-reconnect"}) assert result["ok"] is True diff --git a/python-client/tests/test_session.py b/python-client/tests/test_session.py index 24a1ac03..4e8a4326 100644 --- a/python-client/tests/test_session.py +++ b/python-client/tests/test_session.py @@ -14,6 +14,12 @@ from river.codec import NaiveJsonCodec from river.session import SessionOptions, SessionState from river.transport import WebSocketClientTransport +from tests.test_utils import ( + wait_for, + wait_for_connected, + wait_for_disconnected, + wait_for_session_gone, +) SHORT_OPTIONS = SessionOptions( heartbeat_interval_ms=100, @@ -58,7 +64,6 @@ async def test_ws_close_triggers_no_connection(self, server_url: str): """Force-closing WS transitions session to NO_CONNECTION.""" client = await make_client(server_url) try: - # Make an RPC to establish connection result = await client.rpc("test", "add", {"n": 1}) assert result["ok"] is True @@ -66,17 +71,10 @@ async def test_ws_close_triggers_no_connection(self, server_url: str): assert session is not None assert session.state == SessionState.CONNECTED - # Force-close the WS (not the transport) - ws = session._ws - assert ws is not None - # Disable reconnect so we can observe the state client.transport.reconnect_on_connection_drop = False - await ws.close() - - # Wait for the connection drop to be processed - await asyncio.sleep(0.3) + await session._ws.close() - assert session.state == SessionState.NO_CONNECTION + await wait_for_disconnected(client.transport) finally: await cleanup(client) @@ -85,11 +83,9 @@ async def test_active_rpcs_keep_alive(self, server_url: str): """Active RPCs reset heartbeat miss — no spurious disconnect.""" client = await make_client(server_url) try: - # Make several RPCs over a period longer than heartbeat_interval for _ in range(5): result = await client.rpc("test", "add", {"n": 1}) assert result["ok"] is True - await asyncio.sleep(0.05) session = client.transport.sessions.get("SERVER") assert session is not None @@ -106,39 +102,25 @@ async def test_active_rpcs_keep_alive(self, server_url: str): class TestGracePeriod: @pytest.mark.asyncio async def test_grace_period_expiry_destroys_session(self, server_url: str): - """Session destroyed after grace period elapses without reconnect.""" + """Session destroyed after grace period elapses.""" client = await make_client(server_url) try: result = await client.rpc("test", "add", {"n": 1}) assert result["ok"] is True + client.transport.reconnect_on_connection_drop = False session = client.transport.sessions.get("SERVER") assert session is not None - session_id = session.id - - # Force WS close and disable reconnect - client.transport.reconnect_on_connection_drop = False - ws = session._ws - assert ws is not None - await ws.close() - - # Wait for drop processing - await asyncio.sleep(0.1) - assert session.state == SessionState.NO_CONNECTION - - # Wait for grace period to elapse (300ms + buffer) - await asyncio.sleep(0.4) + await session._ws.close() - # Session should have been deleted - remaining = client.transport.sessions.get("SERVER") - assert remaining is None or remaining.id != session_id + await wait_for_disconnected(client.transport) + await wait_for_session_gone(client.transport) finally: await cleanup(client) @pytest.mark.asyncio async def test_reconnect_within_grace_preserves_session(self, server_url: str): """Reconnecting within grace period preserves the session.""" - # Use longer grace to ensure reconnect completes in time opts = SessionOptions( heartbeat_interval_ms=100, heartbeats_until_dead=2, @@ -153,21 +135,11 @@ async def test_reconnect_within_grace_preserves_session(self, server_url: str): session = client.transport.sessions.get("SERVER") assert session is not None + await session._ws.close() - # Force WS close — auto-reconnect is on by default - ws = session._ws - assert ws is not None - await ws.close() - - # Wait for reconnect to complete (well within 300ms grace) - await asyncio.sleep(0.5) - - # Session should still exist with same ID - new_session = client.transport.sessions.get("SERVER") - # Either same session or a new one (server may have lost state) - assert new_session is not None + # Auto-reconnect is on; wait for reconnection + await wait_for_connected(client.transport) - # Verify connection works result = await client.rpc("test", "add", {"n": 2}) assert result["ok"] is True finally: @@ -199,7 +171,6 @@ async def test_backoff_increases_on_failures(self, server_url: str): assert budget.has_budget() initial_backoff = budget.get_backoff_ms() - # Consume some budget to simulate failures budget.consume_budget() budget.consume_budget() budget.consume_budget() @@ -214,15 +185,11 @@ async def test_budget_restores_after_success(self, server_url: str): """Budget restores gradually after successful connection.""" client = await make_client(server_url) try: - # Make an RPC to trigger a successful connection result = await client.rpc("test", "add", {"n": 1}) assert result["ok"] is True budget = client.transport._retry_budget - # After a successful connection the budget_consumed should be - # restoring (or already at 0) - await asyncio.sleep(0.3) # wait for budget restore - assert budget.budget_consumed <= 1 # mostly restored + await wait_for(lambda: budget.budget_consumed <= 1, timeout=2.0) finally: await cleanup(client) @@ -238,31 +205,43 @@ class TestGracePeriodActiveProcedures: @pytest.mark.asyncio async def test_rpc_gets_disconnect_on_grace_expiry(self, server_url: str): - """RPC buffered during disconnect gets UNEXPECTED_DISCONNECT after grace.""" - client = await make_client(server_url) + """RPC buffered during disconnect gets UNEXPECTED_DISCONNECT.""" + transport = WebSocketClientTransport( + ws_url=server_url, + client_id=None, + server_id="SERVER", + codec=NaiveJsonCodec(), + options=SHORT_OPTIONS, + ) + client = RiverClient( + transport, + server_id="SERVER", + connect_on_invoke=False, + eagerly_connect=True, + ) try: - # Establish connection + await wait_for_connected(transport) + result = await client.rpc("test", "add", {"n": 1}) assert result["ok"] is True - # Kill connection, disable reconnect - client.transport.reconnect_on_connection_drop = False - session = client.transport.sessions.get("SERVER") + transport.reconnect_on_connection_drop = False + session = transport.sessions.get("SERVER") assert session is not None await session._ws.close() - await asyncio.sleep(0.1) - # Start an RPC while disconnected — message gets buffered + # Buffer an RPC on the disconnected session rpc_task = asyncio.create_task(client.rpc("test", "add", {"n": 2})) + await asyncio.sleep(0) # yield so task starts - # Wait for grace period (300ms) to expire - await asyncio.sleep(0.5) + # Grace period expires → session destroyed → RPC fails + await wait_for_session_gone(transport) result = await asyncio.wait_for(rpc_task, timeout=2.0) assert result["ok"] is False assert result["payload"]["code"] == "UNEXPECTED_DISCONNECT" finally: - await cleanup(client) + await transport.close() @pytest.mark.asyncio async def test_stream_gets_disconnect_on_grace_expiry(self, server_url: str): @@ -275,14 +254,12 @@ async def test_stream_gets_disconnect_on_grace_expiry(self, server_url: str): assert not done assert msg["ok"] is True - # Kill connection, disable reconnect client.transport.reconnect_on_connection_drop = False session = client.transport.sessions.get("SERVER") assert session is not None await session._ws.close() - # Wait for grace period to expire - await asyncio.sleep(0.5) + await wait_for_session_gone(client.transport) done, msg = await stream.res_readable.next() assert not done @@ -301,16 +278,14 @@ async def test_upload_gets_disconnect_on_grace_expiry(self, server_url: str): upload.req_writable.write({"n": 1}) # Ensure connection established - await asyncio.sleep(0.1) + await wait_for_connected(client.transport) - # Kill connection, disable reconnect client.transport.reconnect_on_connection_drop = False session = client.transport.sessions.get("SERVER") assert session is not None await session._ws.close() - # Wait for grace period to expire - await asyncio.sleep(0.5) + await wait_for_session_gone(client.transport) result = await asyncio.wait_for(upload.finalize(), timeout=2.0) assert result["ok"] is False @@ -328,14 +303,12 @@ async def test_subscription_gets_disconnect_on_grace_expiry(self, server_url: st assert not done assert msg["ok"] is True - # Kill connection, disable reconnect client.transport.reconnect_on_connection_drop = False session = client.transport.sessions.get("SERVER") assert session is not None await session._ws.close() - # Wait for grace period to expire - await asyncio.sleep(0.5) + await wait_for_session_gone(client.transport) done, msg = await sub.res_readable.next() assert not done @@ -363,20 +336,15 @@ async def test_rpc_after_grace_expiry_creates_new_session(self, server_url: str) assert old_session is not None old_id = old_session.id - # Kill connection, disable reconnect, wait for grace expiry client.transport.reconnect_on_connection_drop = False await old_session._ws.close() - await asyncio.sleep(0.5) - - # Session should be gone - assert client.transport.sessions.get("SERVER") is None + await wait_for_session_gone(client.transport) # Re-enable reconnect and make a new RPC client.transport.reconnect_on_connection_drop = True result = await client.rpc("test", "add", {"n": 2}) assert result["ok"] is True - # Should have a new session with a different ID new_session = client.transport.sessions.get("SERVER") assert new_session is not None assert new_session.id != old_id @@ -385,12 +353,7 @@ async def test_rpc_after_grace_expiry_creates_new_session(self, server_url: str) @pytest.mark.asyncio async def test_connect_on_invoke_false_no_reconnect(self, server_url: str): - """With connect_on_invoke=False, no reconnect after grace expiry. - - The RPC buffers the message but never connects, so it hangs. - We verify this by checking the transport state rather than - waiting for an RPC result. - """ + """With connect_on_invoke=False, no reconnect after grace expiry.""" transport = WebSocketClientTransport( ws_url=server_url, client_id=None, @@ -405,29 +368,20 @@ async def test_connect_on_invoke_false_no_reconnect(self, server_url: str): eagerly_connect=True, ) try: - # Wait for eager connection - await asyncio.sleep(0.3) - session = transport.sessions.get("SERVER") - assert session is not None + await wait_for_connected(transport) - # Make an RPC to verify connection works result = await client.rpc("test", "add", {"n": 1}) assert result["ok"] is True - # Kill connection, disable reconnect, wait for grace expiry transport.reconnect_on_connection_drop = False + session = transport.sessions.get("SERVER") + assert session is not None await session._ws.close() - await asyncio.sleep(0.5) - - # Session should be gone after grace expiry - assert transport.sessions.get("SERVER") is None + await wait_for_session_gone(transport) - # Re-enable reconnect on drop but connect_on_invoke is still false transport.reconnect_on_connection_drop = True - # With connect_on_invoke=False, the transport won't connect. - # Closing the transport now should produce UNEXPECTED_DISCONNECT - # for any pending procedures. + # Close transport; RPC on closed transport → UNEXPECTED_DISCONNECT await transport.close() result = await client.rpc("test", "add", {"n": 2}) diff --git a/python-client/tests/test_utils.py b/python-client/tests/test_utils.py new file mode 100644 index 00000000..4b80047e --- /dev/null +++ b/python-client/tests/test_utils.py @@ -0,0 +1,120 @@ +"""Test utilities for River Python client tests. + +Provides event-driven waiters to replace arbitrary sleeps. +""" + +from __future__ import annotations + +import asyncio +from typing import Any, Callable + +from river.session import SessionState +from river.transport import WebSocketClientTransport + + +async def wait_for( + predicate: Callable[[], bool], + *, + timeout: float = 5.0, + interval: float = 0.01, +) -> None: + """Poll a predicate until it returns True, or raise TimeoutError. + + Args: + predicate: Zero-arg callable returning bool. + timeout: Max seconds to wait. + interval: Seconds between polls. + """ + deadline = asyncio.get_event_loop().time() + timeout + while not predicate(): + if asyncio.get_event_loop().time() > deadline: + raise TimeoutError(f"wait_for timed out after {timeout}s") + await asyncio.sleep(interval) + + +async def wait_for_session_state( + transport: WebSocketClientTransport, + server_id: str, + state: SessionState, + *, + timeout: float = 5.0, +) -> None: + """Wait until the session reaches the given state.""" + await wait_for( + lambda: ( + (s := transport.sessions.get(server_id)) is not None and s.state == state + ), + timeout=timeout, + ) + + +async def wait_for_connected( + transport: WebSocketClientTransport, + server_id: str = "SERVER", + *, + timeout: float = 5.0, +) -> None: + """Wait until session is CONNECTED.""" + await wait_for_session_state( + transport, server_id, SessionState.CONNECTED, timeout=timeout + ) + + +async def wait_for_session_gone( + transport: WebSocketClientTransport, + server_id: str = "SERVER", + *, + timeout: float = 5.0, +) -> None: + """Wait until the session for server_id no longer exists.""" + await wait_for( + lambda: transport.sessions.get(server_id) is None, + timeout=timeout, + ) + + +async def wait_for_disconnected( + transport: WebSocketClientTransport, + server_id: str = "SERVER", + *, + timeout: float = 5.0, +) -> None: + """Wait until session is NO_CONNECTION.""" + await wait_for_session_state( + transport, server_id, SessionState.NO_CONNECTION, timeout=timeout + ) + + +async def wait_for_event( + transport: WebSocketClientTransport, + event_name: str, + *, + timeout: float = 5.0, + predicate: Callable[[Any], bool] | None = None, +) -> Any: + """Wait for a specific event to be dispatched on the transport. + + Args: + transport: The transport to listen on. + event_name: Event name (e.g. "protocolError", "sessionStatus"). + timeout: Max seconds to wait. + predicate: Optional filter; if provided, only events matching + this predicate will resolve the wait. + + Returns: + The event data. + """ + fut: asyncio.Future[Any] = asyncio.get_event_loop().create_future() + + def handler(data: Any) -> None: + if fut.done(): + return + if predicate is not None and not predicate(data): + return + fut.set_result(data) + + transport.add_event_listener(event_name, handler) + try: + return await asyncio.wait_for(fut, timeout=timeout) + finally: + transport.remove_event_listener(event_name, handler) From 6efd70a55c0ee1769424db056eb58a2b750d2170 Mon Sep 17 00:00:00 2001 From: Jacky Zhao Date: Mon, 2 Mar 2026 19:08:25 -0800 Subject: [PATCH 12/29] comments --- python-client/river/codegen/emitter.py | 9 +- python-client/river/session.py | 7 +- python-client/river/streams.py | 1 + python-client/tests/extract_test_schema.mjs | 683 --- python-client/tests/test_server_handshake.mjs | 4537 ----------------- 5 files changed, 10 insertions(+), 5227 deletions(-) delete mode 100644 python-client/tests/extract_test_schema.mjs delete mode 100644 python-client/tests/test_server_handshake.mjs diff --git a/python-client/river/codegen/emitter.py b/python-client/river/codegen/emitter.py index 1bdf23c8..1db2c6b1 100644 --- a/python-client/river/codegen/emitter.py +++ b/python-client/river/codegen/emitter.py @@ -56,10 +56,11 @@ def _collect_used_type_names(svc: ServiceDef, ir: SchemaIR) -> list[str]: def _extract_names(annotation: str, known: set[str], out: set[str]) -> None: - for part in annotation.replace("|", " ").split(): - clean = part.strip("[]").strip() - if clean in known: - out.add(clean) + import re + + for name in re.findall(r"[A-Za-z_]\w*", annotation): + if name in known: + out.add(name) # --------------------------------------------------------------------------- diff --git a/python-client/river/session.py b/python-client/river/session.py index 7287a6b7..7a363b7d 100644 --- a/python-client/river/session.py +++ b/python-client/river/session.py @@ -34,7 +34,7 @@ class SessionState(str, Enum): CONNECTED = "Connected" -@dataclass +@dataclass(frozen=True) class SessionOptions: """Configuration options for a session.""" @@ -141,9 +141,10 @@ def send(self, partial: PartialTransportMessage) -> tuple[bool, str]: if self.state == SessionState.CONNECTED and self._ws is not None: ok, result = self._send_over_wire(msg) if not ok: - # Remove the unsendable message from the buffer so it - # doesn't poison retransmit state. + # Roll back: remove the unsendable message from the buffer + # and restore seq so subsequent messages don't have a gap. self.send_buffer = [m for m in self.send_buffer if m.id != msg.id] + self.seq = msg.seq # restore to the seq we consumed return False, result return True, msg.id diff --git a/python-client/river/streams.py b/python-client/river/streams.py index 375c47f9..9ae18736 100644 --- a/python-client/river/streams.py +++ b/python-client/river/streams.py @@ -182,6 +182,7 @@ async def __anext__(self): def __del__(self): # Synchronous cleanup when the iterator is GC'd (e.g. break in for-await) + self._readable._broken = True self._readable._queue.clear() diff --git a/python-client/tests/extract_test_schema.mjs b/python-client/tests/extract_test_schema.mjs deleted file mode 100644 index 0cb6644b..00000000 --- a/python-client/tests/extract_test_schema.mjs +++ /dev/null @@ -1,683 +0,0 @@ -// python-client/tests/extract_test_schema.ts -import fs from "node:fs"; -import path from "node:path"; - -// router/services.ts -import { Type as Type2, Kind as Kind2 } from "@sinclair/typebox"; - -// router/errors.ts -import { - Kind, - Type -} from "@sinclair/typebox"; -var UNCAUGHT_ERROR_CODE = "UNCAUGHT_ERROR"; -var UNEXPECTED_DISCONNECT_CODE = "UNEXPECTED_DISCONNECT"; -var INVALID_REQUEST_CODE = "INVALID_REQUEST"; -var CANCEL_CODE = "CANCEL"; -var ErrResultSchema = (t) => Type.Object({ - ok: Type.Literal(false), - payload: t -}); -var ValidationErrorDetails = Type.Object({ - path: Type.String(), - message: Type.String() -}); -var ValidationErrors = Type.Array(ValidationErrorDetails); -var CancelErrorSchema = Type.Object({ - code: Type.Literal(CANCEL_CODE), - message: Type.String() -}); -var CancelResultSchema = ErrResultSchema(CancelErrorSchema); -var ReaderErrorSchema = Type.Union([ - Type.Object({ - code: Type.Literal(UNCAUGHT_ERROR_CODE), - message: Type.String() - }), - Type.Object({ - code: Type.Literal(UNEXPECTED_DISCONNECT_CODE), - message: Type.String() - }), - Type.Object({ - code: Type.Literal(INVALID_REQUEST_CODE), - message: Type.String(), - extras: Type.Optional( - Type.Object({ - firstValidationErrors: Type.Array(ValidationErrorDetails), - totalErrors: Type.Number() - }) - ) - }), - CancelErrorSchema -]); -var ReaderErrorResultSchema = ErrResultSchema(ReaderErrorSchema); -function isUnion(schema2) { - return schema2[Kind] === "Union"; -} -function flattenErrorType(errType) { - if (!isUnion(errType)) { - return errType; - } - const flattenedTypes = []; - function flatten(type) { - if (isUnion(type)) { - for (const t of type.anyOf) { - flatten(t); - } - } else { - flattenedTypes.push(type); - } - } - flatten(errType); - return Type.Union(flattenedTypes); -} - -// router/services.ts -function Strict(schema2) { - return JSON.parse(JSON.stringify(schema2)); -} -function serializeSchema(services2, handshakeSchema) { - const serializedServiceObject = Object.entries(services2).reduce((acc, [name, value]) => { - acc[name] = value.serialize(); - return acc; - }, {}); - const schema2 = { - services: serializedServiceObject - }; - if (handshakeSchema) { - schema2.handshakeSchema = Strict(handshakeSchema); - } - return schema2; -} -function createServiceSchema() { - return class ServiceSchema2 { - /** - * Factory function for creating a fresh state. - */ - initializeState; - /** - * The procedures for this service. - */ - procedures; - /** - * @param config - The configuration for this service. - * @param procedures - The procedures for this service. - */ - constructor(config, procedures) { - this.initializeState = config.initializeState; - this.procedures = procedures; - } - /** - * Creates a {@link ServiceScaffold}, which can be used to define procedures - * that can then be merged into a {@link ServiceSchema}, via the scaffold's - * `finalize` method. - * - * There are two patterns that work well with this method. The first is using - * it to separate the definition of procedures from the definition of the - * service's configuration: - * ```ts - * const MyServiceScaffold = ServiceSchema.scaffold({ - * initializeState: () => ({ count: 0 }), - * }); - * - * const incrementProcedures = MyServiceScaffold.procedures({ - * increment: Procedure.rpc({ - * requestInit: Type.Object({ amount: Type.Number() }), - * responseData: Type.Object({ current: Type.Number() }), - * async handler(ctx, init) { - * ctx.state.count += init.amount; - * return Ok({ current: ctx.state.count }); - * } - * }), - * }) - * - * const MyService = MyServiceScaffold.finalize({ - * ...incrementProcedures, - * // you can also directly define procedures here - * }); - * ``` - * This might be really handy if you have a very large service and you're - * wanting to split it over multiple files. You can define the scaffold - * in one file, and then import that scaffold in other files where you - * define procedures - and then finally import the scaffolds and your - * procedure objects in a final file where you finalize the scaffold into - * a service schema. - * - * The other way is to use it like in a builder pattern: - * ```ts - * const MyService = ServiceSchema - * .scaffold({ initializeState: () => ({ count: 0 }) }) - * .finalize({ - * increment: Procedure.rpc({ - * requestInit: Type.Object({ amount: Type.Number() }), - * responseData: Type.Object({ current: Type.Number() }), - * async handler(ctx, init) { - * ctx.state.count += init.amount; - * return Ok({ current: ctx.state.count }); - * } - * }), - * }) - * ``` - * Depending on your preferences, this may be a more appealing way to define - * a schema versus using the {@link ServiceSchema.define} method. - */ - static scaffold(config) { - return new ServiceScaffold(config); - } - // actual implementation - static define(configOrProcedures, maybeProcedures) { - let config; - let procedures; - if ("initializeState" in configOrProcedures && typeof configOrProcedures.initializeState === "function") { - if (!maybeProcedures) { - throw new Error("Expected procedures to be defined"); - } - config = configOrProcedures; - procedures = maybeProcedures; - } else { - config = { initializeState: () => ({}) }; - procedures = configOrProcedures; - } - return new ServiceSchema2(config, procedures); - } - /** - * Serializes this schema's procedures into a plain object that is JSON compatible. - */ - serialize() { - return { - procedures: Object.fromEntries( - Object.entries(this.procedures).map(([procName, procDef]) => [ - procName, - { - init: Strict(procDef.requestInit), - output: Strict(procDef.responseData), - errors: getSerializedProcErrors(procDef), - // Only add `description` field if the type declares it. - ..."description" in procDef ? { description: procDef.description } : {}, - type: procDef.type, - // Only add the `input` field if the type declares it. - ..."requestData" in procDef ? { - input: Strict(procDef.requestData) - } : {} - } - ]) - ) - }; - } - // TODO remove once clients migrate to v2 - /** - * Same as {@link ServiceSchema.serialize}, but with a format that is compatible with - * protocol v1. This is useful to be able to continue to generate schemas for older - * clients as they are still supported. - */ - serializeV1Compat() { - return { - procedures: Object.fromEntries( - Object.entries(this.procedures).map( - ([procName, procDef]) => { - if (procDef.type === "rpc" || procDef.type === "subscription") { - return [ - procName, - { - // BACKWARDS COMPAT: map init to input for protocolv1 - // this is the only change needed to make it compatible. - input: Strict(procDef.requestInit), - output: Strict(procDef.responseData), - errors: getSerializedProcErrors(procDef), - // Only add `description` field if the type declares it. - ..."description" in procDef ? { description: procDef.description } : {}, - type: procDef.type - } - ]; - } - return [ - procName, - { - init: Strict(procDef.requestInit), - output: Strict(procDef.responseData), - errors: getSerializedProcErrors(procDef), - // Only add `description` field if the type declares it. - ..."description" in procDef ? { description: procDef.description } : {}, - type: procDef.type, - input: Strict(procDef.requestData) - } - ]; - } - ) - ) - }; - } - /** - * Instantiates this schema into a {@link Service} object. - * - * You probably don't need this, usually the River server will handle this - * for you. - */ - instantiate(extendedContext) { - const state = this.initializeState(extendedContext); - const dispose = async () => { - await state[Symbol.asyncDispose]?.(); - state[Symbol.dispose]?.(); - }; - return Object.freeze({ - state, - procedures: this.procedures, - [Symbol.asyncDispose]: dispose - }); - } - }; -} -function getSerializedProcErrors(procDef) { - if (!("responseError" in procDef) || procDef.responseError[Kind2] === "Never") { - return Strict(ReaderErrorSchema); - } - const withProtocolErrors = flattenErrorType( - Type2.Union([procDef.responseError, ReaderErrorSchema]) - ); - return Strict(withProtocolErrors); -} -var ServiceScaffold = class { - /** - * The configuration for this service. - */ - config; - /** - * @param config - The configuration for this service. - */ - constructor(config) { - this.config = config; - } - /** - * Define procedures for this service. Use the {@link Procedure} constructors - * to create them. This returns the procedures object, which can then be - * passed to {@link ServiceSchema.finalize} to create a {@link ServiceSchema}. - * - * @example - * ``` - * const myProcedures = MyServiceScaffold.procedures({ - * myRPC: Procedure.rpc({ - * // ... - * }), - * }); - * - * const MyService = MyServiceScaffold.finalize({ - * ...myProcedures, - * }); - * ``` - * - * @param procedures - The procedures for this service. - */ - procedures(procedures) { - return procedures; - } - /** - * Finalizes the scaffold into a {@link ServiceSchema}. This is where you - * provide the service's procedures and get a {@link ServiceSchema} in return. - * - * You can directly define procedures here, or you can define them separately - * with the {@link ServiceScaffold.procedures} method, and then pass them here. - * - * @example - * ``` - * const MyService = MyServiceScaffold.finalize({ - * myRPC: Procedure.rpc({ - * // ... - * }), - * // e.g. from the procedures method - * ...myOtherProcedures, - * }); - * ``` - */ - finalize(procedures) { - return createServiceSchema().define( - this.config, - procedures - ); - } -}; - -// router/result.ts -import { Type as Type3 } from "@sinclair/typebox"; -var AnyResultSchema = Type3.Union([ - Type3.Object({ - ok: Type3.Literal(false), - payload: Type3.Object({ - code: Type3.String(), - message: Type3.String(), - extras: Type3.Optional(Type3.Unknown()) - }) - }), - Type3.Object({ - ok: Type3.Literal(true), - payload: Type3.Unknown() - }) -]); -function Ok(payload) { - return { - ok: true, - payload - }; -} - -// router/procedures.ts -import { Type as Type4 } from "@sinclair/typebox"; -function rpc({ - requestInit, - responseData, - responseError = Type4.Never(), - description, - handler -}) { - return { - ...description ? { description } : {}, - type: "rpc", - requestInit, - responseData, - responseError, - handler - }; -} -function upload({ - requestInit, - requestData, - responseData, - responseError = Type4.Never(), - description, - handler -}) { - return { - type: "upload", - ...description ? { description } : {}, - requestInit, - requestData, - responseData, - responseError, - handler - }; -} -function subscription({ - requestInit, - responseData, - responseError = Type4.Never(), - description, - handler -}) { - return { - type: "subscription", - ...description ? { description } : {}, - requestInit, - responseData, - responseError, - handler - }; -} -function stream({ - requestInit, - requestData, - responseData, - responseError = Type4.Never(), - description, - handler -}) { - return { - type: "stream", - ...description ? { description } : {}, - requestInit, - requestData, - responseData, - responseError, - handler - }; -} -var Procedure = { - rpc, - upload, - subscription, - stream -}; - -// python-client/tests/extract_test_schema.ts -import { Type as Type5 } from "@sinclair/typebox"; -var ServiceSchema = createServiceSchema(); -var TestServiceSchema = ServiceSchema.define({ - add: Procedure.rpc({ - requestInit: Type5.Object({ n: Type5.Number() }), - responseData: Type5.Object({ result: Type5.Number() }), - responseError: Type5.Never(), - async handler({ reqInit }) { - return Ok({ result: reqInit.n }); - } - }), - echo: Procedure.stream({ - requestInit: Type5.Object({}), - requestData: Type5.Object({ - msg: Type5.String(), - ignore: Type5.Optional(Type5.Boolean()) - }), - responseData: Type5.Object({ response: Type5.String() }), - responseError: Type5.Never(), - async handler({ resWritable }) { - resWritable.close(); - } - }), - echoWithPrefix: Procedure.stream({ - requestInit: Type5.Object({ prefix: Type5.String() }), - requestData: Type5.Object({ - msg: Type5.String(), - ignore: Type5.Optional(Type5.Boolean()) - }), - responseData: Type5.Object({ response: Type5.String() }), - responseError: Type5.Never(), - async handler({ resWritable }) { - resWritable.close(); - } - }), - echoBinary: Procedure.rpc({ - requestInit: Type5.Object({ data: Type5.Uint8Array() }), - responseData: Type5.Object({ - data: Type5.Uint8Array(), - length: Type5.Number() - }), - responseError: Type5.Never(), - async handler({ reqInit }) { - return Ok({ data: reqInit.data, length: reqInit.data.length }); - } - }) -}); -var OrderingServiceSchema = ServiceSchema.define({ - add: Procedure.rpc({ - requestInit: Type5.Object({ n: Type5.Number() }), - responseData: Type5.Object({ n: Type5.Number() }), - responseError: Type5.Never(), - async handler({ reqInit }) { - return Ok({ n: reqInit.n }); - } - }), - getAll: Procedure.rpc({ - requestInit: Type5.Object({}), - responseData: Type5.Object({ msgs: Type5.Array(Type5.Number()) }), - responseError: Type5.Never(), - async handler(_ctx) { - return Ok({ msgs: [] }); - } - }) -}); -var FallibleServiceSchema = ServiceSchema.define({ - divide: Procedure.rpc({ - requestInit: Type5.Object({ a: Type5.Number(), b: Type5.Number() }), - responseData: Type5.Object({ result: Type5.Number() }), - responseError: Type5.Union([ - Type5.Object({ - code: Type5.Literal("DIV_BY_ZERO"), - message: Type5.String() - }), - Type5.Object({ - code: Type5.Literal("INFINITY"), - message: Type5.String() - }) - ]), - async handler({ reqInit }) { - return Ok({ result: reqInit.a / reqInit.b }); - } - }), - echo: Procedure.stream({ - requestInit: Type5.Object({}), - requestData: Type5.Object({ - msg: Type5.String(), - throwResult: Type5.Optional(Type5.Boolean()), - throwError: Type5.Optional(Type5.Boolean()) - }), - responseData: Type5.Object({ response: Type5.String() }), - responseError: Type5.Object({ - code: Type5.Literal("STREAM_ERROR"), - message: Type5.String() - }), - async handler({ resWritable }) { - resWritable.close(); - } - }) -}); -var SubscribableServiceSchema = ServiceSchema.define({ - add: Procedure.rpc({ - requestInit: Type5.Object({ n: Type5.Number() }), - responseData: Type5.Object({ result: Type5.Number() }), - responseError: Type5.Never(), - async handler({ reqInit }) { - return Ok({ result: reqInit.n }); - } - }), - value: Procedure.subscription({ - requestInit: Type5.Object({}), - responseData: Type5.Object({ count: Type5.Number() }), - responseError: Type5.Never(), - async handler({ resWritable }) { - resWritable.write(Ok({ count: 0 })); - resWritable.close(); - } - }) -}); -var UploadableServiceSchema = ServiceSchema.define({ - addMultiple: Procedure.upload({ - requestInit: Type5.Object({}), - requestData: Type5.Object({ n: Type5.Number() }), - responseData: Type5.Object({ result: Type5.Number() }), - responseError: Type5.Never(), - async handler(_ctx) { - return Ok({ result: 0 }); - } - }), - addMultipleWithPrefix: Procedure.upload({ - requestInit: Type5.Object({ prefix: Type5.String() }), - requestData: Type5.Object({ n: Type5.Number() }), - responseData: Type5.Object({ result: Type5.String() }), - responseError: Type5.Never(), - async handler(_ctx) { - return Ok({ result: "" }); - } - }), - cancellableAdd: Procedure.upload({ - requestInit: Type5.Object({}), - requestData: Type5.Object({ n: Type5.Number() }), - responseData: Type5.Object({ result: Type5.Number() }), - responseError: Type5.Object({ - code: Type5.Literal("CANCEL"), - message: Type5.String() - }), - async handler(_ctx) { - return Ok({ result: 0 }); - } - }) -}); -var CancellationServiceSchema = ServiceSchema.define({ - blockingRpc: Procedure.rpc({ - requestInit: Type5.Object({}), - responseData: Type5.Object({}), - responseError: Type5.Never(), - async handler(_ctx) { - return Ok({}); - } - }), - blockingStream: Procedure.stream({ - requestInit: Type5.Object({}), - requestData: Type5.Object({}), - responseData: Type5.Object({}), - responseError: Type5.Never(), - async handler({ resWritable }) { - resWritable.close(); - } - }), - blockingUpload: Procedure.upload({ - requestInit: Type5.Object({}), - requestData: Type5.Object({}), - responseData: Type5.Object({}), - responseError: Type5.Never(), - async handler(_ctx) { - return Ok({}); - } - }), - blockingSubscription: Procedure.subscription({ - requestInit: Type5.Object({}), - responseData: Type5.Object({}), - responseError: Type5.Never(), - async handler({ resWritable }) { - resWritable.close(); - } - }), - immediateRpc: Procedure.rpc({ - requestInit: Type5.Object({}), - responseData: Type5.Object({ done: Type5.Boolean() }), - responseError: Type5.Never(), - async handler(_ctx) { - return Ok({ done: true }); - } - }), - immediateStream: Procedure.stream({ - requestInit: Type5.Object({}), - requestData: Type5.Object({}), - responseData: Type5.Object({ done: Type5.Boolean() }), - responseError: Type5.Never(), - async handler({ resWritable }) { - resWritable.close(); - } - }), - immediateUpload: Procedure.upload({ - requestInit: Type5.Object({}), - requestData: Type5.Object({}), - responseData: Type5.Object({ done: Type5.Boolean() }), - responseError: Type5.Never(), - async handler(_ctx) { - return Ok({ done: true }); - } - }), - immediateSubscription: Procedure.subscription({ - requestInit: Type5.Object({}), - responseData: Type5.Object({ done: Type5.Boolean() }), - responseError: Type5.Never(), - async handler({ resWritable }) { - resWritable.close(); - } - }), - countedStream: Procedure.stream({ - requestInit: Type5.Object({ total: Type5.Number() }), - requestData: Type5.Object({}), - responseData: Type5.Object({ i: Type5.Number() }), - responseError: Type5.Never(), - async handler({ resWritable }) { - resWritable.close(); - } - }) -}); -var services = { - test: TestServiceSchema, - ordering: OrderingServiceSchema, - fallible: FallibleServiceSchema, - subscribable: SubscribableServiceSchema, - uploadable: UploadableServiceSchema, - cancel: CancellationServiceSchema -}; -var schema = serializeSchema(services); -var outPath = path.join( - path.dirname(new URL(import.meta.url).pathname), - "test_schema.json" -); -fs.writeFileSync(outPath, JSON.stringify(schema, null, 2)); -console.log(`Wrote schema to ${outPath}`); diff --git a/python-client/tests/test_server_handshake.mjs b/python-client/tests/test_server_handshake.mjs deleted file mode 100644 index 1a113014..00000000 --- a/python-client/tests/test_server_handshake.mjs +++ /dev/null @@ -1,4537 +0,0 @@ -// python-client/tests/test_server_handshake.ts -import http from "node:http"; -import { WebSocketServer } from "ws"; - -// node_modules/nanoid/index.js -import { webcrypto as crypto } from "node:crypto"; -var POOL_SIZE_MULTIPLIER = 128; -var pool; -var poolOffset; -function fillPool(bytes) { - if (!pool || pool.length < bytes) { - pool = Buffer.allocUnsafe(bytes * POOL_SIZE_MULTIPLIER); - crypto.getRandomValues(pool); - poolOffset = 0; - } else if (poolOffset + bytes > pool.length) { - crypto.getRandomValues(pool); - poolOffset = 0; - } - poolOffset += bytes; -} -function random(bytes) { - fillPool(bytes |= 0); - return pool.subarray(poolOffset - bytes, poolOffset); -} -function customRandom(alphabet2, defaultSize, getRandom) { - let mask = (2 << 31 - Math.clz32(alphabet2.length - 1 | 1)) - 1; - let step = Math.ceil(1.6 * mask * defaultSize / alphabet2.length); - return (size = defaultSize) => { - let id = ""; - while (true) { - let bytes = getRandom(step); - let i = step; - while (i--) { - id += alphabet2[bytes[i] & mask] || ""; - if (id.length >= size) return id; - } - } - }; -} -function customAlphabet(alphabet2, size = 21) { - return customRandom(alphabet2, size, random); -} - -// transport/id.ts -var alphabet = customAlphabet( - "1234567890abcdefghijklmnopqrstuvxyzABCDEFGHIJKLMNOPQRSTUVXYZ" -); -var generateId = () => alphabet(12); - -// transport/connection.ts -var Connection = class { - id; - telemetry; - constructor() { - this.id = `conn-${generateId()}`; - } - get loggingMetadata() { - const metadata = { connId: this.id }; - if (this.telemetry?.span.isRecording()) { - const spanContext = this.telemetry.span.spanContext(); - metadata.telemetry = { - traceId: spanContext.traceId, - spanId: spanContext.spanId - }; - } - return metadata; - } - dataListener; - closeListener; - errorListener; - onData(msg) { - this.dataListener?.(msg); - } - onError(err) { - this.errorListener?.(err); - } - onClose() { - this.closeListener?.(); - this.telemetry?.span.end(); - } - /** - * Set the callback for when a message is received. - * @param cb The message handler callback. - */ - setDataListener(cb) { - this.dataListener = cb; - } - removeDataListener() { - this.dataListener = void 0; - } - /** - * Set the callback for when the connection is closed. - * This should also be called if an error happens and after notifying the error listener. - * @param cb The callback to call when the connection is closed. - */ - setCloseListener(cb) { - this.closeListener = cb; - } - removeCloseListener() { - this.closeListener = void 0; - } - /** - * Set the callback for when an error is received. - * This should only be used for logging errors, all cleanup - * should be delegated to setCloseListener. - * - * The implementer should take care such that the implemented - * connection will call both the close and error callbacks - * on an error. - * - * @param cb The callback to call when an error is received. - */ - setErrorListener(cb) { - this.errorListener = cb; - } - removeErrorListener() { - this.errorListener = void 0; - } -}; - -// transport/impls/ws/connection.ts -var WS_HEALTHY_CLOSE_CODE = 1e3; -var WebSocketCloseError = class extends Error { - code; - reason; - constructor(code, reason) { - super(`websocket closed with code and reason: ${code} - ${reason}`); - this.code = code; - this.reason = reason; - } -}; -var WebSocketConnection = class extends Connection { - ws; - extras; - get loggingMetadata() { - const metadata = super.loggingMetadata; - if (this.extras) { - metadata.extras = this.extras; - } - return metadata; - } - constructor(ws, extras) { - super(); - this.ws = ws; - this.extras = extras; - this.ws.binaryType = "arraybuffer"; - let didError = false; - this.ws.onerror = () => { - didError = true; - }; - this.ws.onclose = ({ code, reason }) => { - if (didError) { - const err = new WebSocketCloseError(code, reason); - this.onError(err); - } - this.onClose(); - }; - this.ws.onmessage = (msg) => { - this.onData(msg.data); - }; - } - send(payload) { - try { - this.ws.send(payload); - return true; - } catch { - return false; - } - } - close() { - this.ws.close(WS_HEALTHY_CLOSE_CODE); - } -}; - -// node_modules/@opentelemetry/api/build/esm/platform/node/globalThis.js -var _globalThis = typeof globalThis === "object" ? globalThis : global; - -// node_modules/@opentelemetry/api/build/esm/version.js -var VERSION = "1.8.0"; - -// node_modules/@opentelemetry/api/build/esm/internal/semver.js -var re = /^(\d+)\.(\d+)\.(\d+)(-(.+))?$/; -function _makeCompatibilityCheck(ownVersion) { - var acceptedVersions = /* @__PURE__ */ new Set([ownVersion]); - var rejectedVersions = /* @__PURE__ */ new Set(); - var myVersionMatch = ownVersion.match(re); - if (!myVersionMatch) { - return function() { - return false; - }; - } - var ownVersionParsed = { - major: +myVersionMatch[1], - minor: +myVersionMatch[2], - patch: +myVersionMatch[3], - prerelease: myVersionMatch[4] - }; - if (ownVersionParsed.prerelease != null) { - return function isExactmatch(globalVersion) { - return globalVersion === ownVersion; - }; - } - function _reject(v) { - rejectedVersions.add(v); - return false; - } - function _accept(v) { - acceptedVersions.add(v); - return true; - } - return function isCompatible2(globalVersion) { - if (acceptedVersions.has(globalVersion)) { - return true; - } - if (rejectedVersions.has(globalVersion)) { - return false; - } - var globalVersionMatch = globalVersion.match(re); - if (!globalVersionMatch) { - return _reject(globalVersion); - } - var globalVersionParsed = { - major: +globalVersionMatch[1], - minor: +globalVersionMatch[2], - patch: +globalVersionMatch[3], - prerelease: globalVersionMatch[4] - }; - if (globalVersionParsed.prerelease != null) { - return _reject(globalVersion); - } - if (ownVersionParsed.major !== globalVersionParsed.major) { - return _reject(globalVersion); - } - if (ownVersionParsed.major === 0) { - if (ownVersionParsed.minor === globalVersionParsed.minor && ownVersionParsed.patch <= globalVersionParsed.patch) { - return _accept(globalVersion); - } - return _reject(globalVersion); - } - if (ownVersionParsed.minor <= globalVersionParsed.minor) { - return _accept(globalVersion); - } - return _reject(globalVersion); - }; -} -var isCompatible = _makeCompatibilityCheck(VERSION); - -// node_modules/@opentelemetry/api/build/esm/internal/global-utils.js -var major = VERSION.split(".")[0]; -var GLOBAL_OPENTELEMETRY_API_KEY = Symbol.for("opentelemetry.js.api." + major); -var _global = _globalThis; -function registerGlobal(type, instance, diag2, allowOverride) { - var _a; - if (allowOverride === void 0) { - allowOverride = false; - } - var api = _global[GLOBAL_OPENTELEMETRY_API_KEY] = (_a = _global[GLOBAL_OPENTELEMETRY_API_KEY]) !== null && _a !== void 0 ? _a : { - version: VERSION - }; - if (!allowOverride && api[type]) { - var err = new Error("@opentelemetry/api: Attempted duplicate registration of API: " + type); - diag2.error(err.stack || err.message); - return false; - } - if (api.version !== VERSION) { - var err = new Error("@opentelemetry/api: Registration of version v" + api.version + " for " + type + " does not match previously registered API v" + VERSION); - diag2.error(err.stack || err.message); - return false; - } - api[type] = instance; - diag2.debug("@opentelemetry/api: Registered a global for " + type + " v" + VERSION + "."); - return true; -} -function getGlobal(type) { - var _a, _b; - var globalVersion = (_a = _global[GLOBAL_OPENTELEMETRY_API_KEY]) === null || _a === void 0 ? void 0 : _a.version; - if (!globalVersion || !isCompatible(globalVersion)) { - return; - } - return (_b = _global[GLOBAL_OPENTELEMETRY_API_KEY]) === null || _b === void 0 ? void 0 : _b[type]; -} -function unregisterGlobal(type, diag2) { - diag2.debug("@opentelemetry/api: Unregistering a global for " + type + " v" + VERSION + "."); - var api = _global[GLOBAL_OPENTELEMETRY_API_KEY]; - if (api) { - delete api[type]; - } -} - -// node_modules/@opentelemetry/api/build/esm/diag/ComponentLogger.js -var __read = function(o, n) { - var m = typeof Symbol === "function" && o[Symbol.iterator]; - if (!m) return o; - var i = m.call(o), r, ar = [], e; - try { - while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value); - } catch (error) { - e = { error }; - } finally { - try { - if (r && !r.done && (m = i["return"])) m.call(i); - } finally { - if (e) throw e.error; - } - } - return ar; -}; -var __spreadArray = function(to, from, pack) { - if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) { - if (ar || !(i in from)) { - if (!ar) ar = Array.prototype.slice.call(from, 0, i); - ar[i] = from[i]; - } - } - return to.concat(ar || Array.prototype.slice.call(from)); -}; -var DiagComponentLogger = ( - /** @class */ - function() { - function DiagComponentLogger2(props) { - this._namespace = props.namespace || "DiagComponentLogger"; - } - DiagComponentLogger2.prototype.debug = function() { - var args = []; - for (var _i = 0; _i < arguments.length; _i++) { - args[_i] = arguments[_i]; - } - return logProxy("debug", this._namespace, args); - }; - DiagComponentLogger2.prototype.error = function() { - var args = []; - for (var _i = 0; _i < arguments.length; _i++) { - args[_i] = arguments[_i]; - } - return logProxy("error", this._namespace, args); - }; - DiagComponentLogger2.prototype.info = function() { - var args = []; - for (var _i = 0; _i < arguments.length; _i++) { - args[_i] = arguments[_i]; - } - return logProxy("info", this._namespace, args); - }; - DiagComponentLogger2.prototype.warn = function() { - var args = []; - for (var _i = 0; _i < arguments.length; _i++) { - args[_i] = arguments[_i]; - } - return logProxy("warn", this._namespace, args); - }; - DiagComponentLogger2.prototype.verbose = function() { - var args = []; - for (var _i = 0; _i < arguments.length; _i++) { - args[_i] = arguments[_i]; - } - return logProxy("verbose", this._namespace, args); - }; - return DiagComponentLogger2; - }() -); -function logProxy(funcName, namespace, args) { - var logger = getGlobal("diag"); - if (!logger) { - return; - } - args.unshift(namespace); - return logger[funcName].apply(logger, __spreadArray([], __read(args), false)); -} - -// node_modules/@opentelemetry/api/build/esm/diag/types.js -var DiagLogLevel; -(function(DiagLogLevel2) { - DiagLogLevel2[DiagLogLevel2["NONE"] = 0] = "NONE"; - DiagLogLevel2[DiagLogLevel2["ERROR"] = 30] = "ERROR"; - DiagLogLevel2[DiagLogLevel2["WARN"] = 50] = "WARN"; - DiagLogLevel2[DiagLogLevel2["INFO"] = 60] = "INFO"; - DiagLogLevel2[DiagLogLevel2["DEBUG"] = 70] = "DEBUG"; - DiagLogLevel2[DiagLogLevel2["VERBOSE"] = 80] = "VERBOSE"; - DiagLogLevel2[DiagLogLevel2["ALL"] = 9999] = "ALL"; -})(DiagLogLevel || (DiagLogLevel = {})); - -// node_modules/@opentelemetry/api/build/esm/diag/internal/logLevelLogger.js -function createLogLevelDiagLogger(maxLevel, logger) { - if (maxLevel < DiagLogLevel.NONE) { - maxLevel = DiagLogLevel.NONE; - } else if (maxLevel > DiagLogLevel.ALL) { - maxLevel = DiagLogLevel.ALL; - } - logger = logger || {}; - function _filterFunc(funcName, theLevel) { - var theFunc = logger[funcName]; - if (typeof theFunc === "function" && maxLevel >= theLevel) { - return theFunc.bind(logger); - } - return function() { - }; - } - return { - error: _filterFunc("error", DiagLogLevel.ERROR), - warn: _filterFunc("warn", DiagLogLevel.WARN), - info: _filterFunc("info", DiagLogLevel.INFO), - debug: _filterFunc("debug", DiagLogLevel.DEBUG), - verbose: _filterFunc("verbose", DiagLogLevel.VERBOSE) - }; -} - -// node_modules/@opentelemetry/api/build/esm/api/diag.js -var __read2 = function(o, n) { - var m = typeof Symbol === "function" && o[Symbol.iterator]; - if (!m) return o; - var i = m.call(o), r, ar = [], e; - try { - while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value); - } catch (error) { - e = { error }; - } finally { - try { - if (r && !r.done && (m = i["return"])) m.call(i); - } finally { - if (e) throw e.error; - } - } - return ar; -}; -var __spreadArray2 = function(to, from, pack) { - if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) { - if (ar || !(i in from)) { - if (!ar) ar = Array.prototype.slice.call(from, 0, i); - ar[i] = from[i]; - } - } - return to.concat(ar || Array.prototype.slice.call(from)); -}; -var API_NAME = "diag"; -var DiagAPI = ( - /** @class */ - function() { - function DiagAPI2() { - function _logProxy(funcName) { - return function() { - var args = []; - for (var _i = 0; _i < arguments.length; _i++) { - args[_i] = arguments[_i]; - } - var logger = getGlobal("diag"); - if (!logger) - return; - return logger[funcName].apply(logger, __spreadArray2([], __read2(args), false)); - }; - } - var self = this; - var setLogger = function(logger, optionsOrLogLevel) { - var _a, _b, _c; - if (optionsOrLogLevel === void 0) { - optionsOrLogLevel = { logLevel: DiagLogLevel.INFO }; - } - if (logger === self) { - var err = new Error("Cannot use diag as the logger for itself. Please use a DiagLogger implementation like ConsoleDiagLogger or a custom implementation"); - self.error((_a = err.stack) !== null && _a !== void 0 ? _a : err.message); - return false; - } - if (typeof optionsOrLogLevel === "number") { - optionsOrLogLevel = { - logLevel: optionsOrLogLevel - }; - } - var oldLogger = getGlobal("diag"); - var newLogger = createLogLevelDiagLogger((_b = optionsOrLogLevel.logLevel) !== null && _b !== void 0 ? _b : DiagLogLevel.INFO, logger); - if (oldLogger && !optionsOrLogLevel.suppressOverrideMessage) { - var stack = (_c = new Error().stack) !== null && _c !== void 0 ? _c : ""; - oldLogger.warn("Current logger will be overwritten from " + stack); - newLogger.warn("Current logger will overwrite one already registered from " + stack); - } - return registerGlobal("diag", newLogger, self, true); - }; - self.setLogger = setLogger; - self.disable = function() { - unregisterGlobal(API_NAME, self); - }; - self.createComponentLogger = function(options) { - return new DiagComponentLogger(options); - }; - self.verbose = _logProxy("verbose"); - self.debug = _logProxy("debug"); - self.info = _logProxy("info"); - self.warn = _logProxy("warn"); - self.error = _logProxy("error"); - } - DiagAPI2.instance = function() { - if (!this._instance) { - this._instance = new DiagAPI2(); - } - return this._instance; - }; - return DiagAPI2; - }() -); - -// node_modules/@opentelemetry/api/build/esm/baggage/internal/baggage-impl.js -var __read3 = function(o, n) { - var m = typeof Symbol === "function" && o[Symbol.iterator]; - if (!m) return o; - var i = m.call(o), r, ar = [], e; - try { - while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value); - } catch (error) { - e = { error }; - } finally { - try { - if (r && !r.done && (m = i["return"])) m.call(i); - } finally { - if (e) throw e.error; - } - } - return ar; -}; -var __values = function(o) { - var s = typeof Symbol === "function" && Symbol.iterator, m = s && o[s], i = 0; - if (m) return m.call(o); - if (o && typeof o.length === "number") return { - next: function() { - if (o && i >= o.length) o = void 0; - return { value: o && o[i++], done: !o }; - } - }; - throw new TypeError(s ? "Object is not iterable." : "Symbol.iterator is not defined."); -}; -var BaggageImpl = ( - /** @class */ - function() { - function BaggageImpl2(entries) { - this._entries = entries ? new Map(entries) : /* @__PURE__ */ new Map(); - } - BaggageImpl2.prototype.getEntry = function(key) { - var entry = this._entries.get(key); - if (!entry) { - return void 0; - } - return Object.assign({}, entry); - }; - BaggageImpl2.prototype.getAllEntries = function() { - return Array.from(this._entries.entries()).map(function(_a) { - var _b = __read3(_a, 2), k = _b[0], v = _b[1]; - return [k, v]; - }); - }; - BaggageImpl2.prototype.setEntry = function(key, entry) { - var newBaggage = new BaggageImpl2(this._entries); - newBaggage._entries.set(key, entry); - return newBaggage; - }; - BaggageImpl2.prototype.removeEntry = function(key) { - var newBaggage = new BaggageImpl2(this._entries); - newBaggage._entries.delete(key); - return newBaggage; - }; - BaggageImpl2.prototype.removeEntries = function() { - var e_1, _a; - var keys = []; - for (var _i = 0; _i < arguments.length; _i++) { - keys[_i] = arguments[_i]; - } - var newBaggage = new BaggageImpl2(this._entries); - try { - for (var keys_1 = __values(keys), keys_1_1 = keys_1.next(); !keys_1_1.done; keys_1_1 = keys_1.next()) { - var key = keys_1_1.value; - newBaggage._entries.delete(key); - } - } catch (e_1_1) { - e_1 = { error: e_1_1 }; - } finally { - try { - if (keys_1_1 && !keys_1_1.done && (_a = keys_1.return)) _a.call(keys_1); - } finally { - if (e_1) throw e_1.error; - } - } - return newBaggage; - }; - BaggageImpl2.prototype.clear = function() { - return new BaggageImpl2(); - }; - return BaggageImpl2; - }() -); - -// node_modules/@opentelemetry/api/build/esm/baggage/utils.js -var diag = DiagAPI.instance(); -function createBaggage(entries) { - if (entries === void 0) { - entries = {}; - } - return new BaggageImpl(new Map(Object.entries(entries))); -} - -// node_modules/@opentelemetry/api/build/esm/context/context.js -function createContextKey(description) { - return Symbol.for(description); -} -var BaseContext = ( - /** @class */ - /* @__PURE__ */ function() { - function BaseContext2(parentContext) { - var self = this; - self._currentContext = parentContext ? new Map(parentContext) : /* @__PURE__ */ new Map(); - self.getValue = function(key) { - return self._currentContext.get(key); - }; - self.setValue = function(key, value) { - var context2 = new BaseContext2(self._currentContext); - context2._currentContext.set(key, value); - return context2; - }; - self.deleteValue = function(key) { - var context2 = new BaseContext2(self._currentContext); - context2._currentContext.delete(key); - return context2; - }; - } - return BaseContext2; - }() -); -var ROOT_CONTEXT = new BaseContext(); - -// node_modules/@opentelemetry/api/build/esm/propagation/TextMapPropagator.js -var defaultTextMapGetter = { - get: function(carrier, key) { - if (carrier == null) { - return void 0; - } - return carrier[key]; - }, - keys: function(carrier) { - if (carrier == null) { - return []; - } - return Object.keys(carrier); - } -}; -var defaultTextMapSetter = { - set: function(carrier, key, value) { - if (carrier == null) { - return; - } - carrier[key] = value; - } -}; - -// node_modules/@opentelemetry/api/build/esm/context/NoopContextManager.js -var __read4 = function(o, n) { - var m = typeof Symbol === "function" && o[Symbol.iterator]; - if (!m) return o; - var i = m.call(o), r, ar = [], e; - try { - while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value); - } catch (error) { - e = { error }; - } finally { - try { - if (r && !r.done && (m = i["return"])) m.call(i); - } finally { - if (e) throw e.error; - } - } - return ar; -}; -var __spreadArray3 = function(to, from, pack) { - if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) { - if (ar || !(i in from)) { - if (!ar) ar = Array.prototype.slice.call(from, 0, i); - ar[i] = from[i]; - } - } - return to.concat(ar || Array.prototype.slice.call(from)); -}; -var NoopContextManager = ( - /** @class */ - function() { - function NoopContextManager2() { - } - NoopContextManager2.prototype.active = function() { - return ROOT_CONTEXT; - }; - NoopContextManager2.prototype.with = function(_context, fn, thisArg) { - var args = []; - for (var _i = 3; _i < arguments.length; _i++) { - args[_i - 3] = arguments[_i]; - } - return fn.call.apply(fn, __spreadArray3([thisArg], __read4(args), false)); - }; - NoopContextManager2.prototype.bind = function(_context, target) { - return target; - }; - NoopContextManager2.prototype.enable = function() { - return this; - }; - NoopContextManager2.prototype.disable = function() { - return this; - }; - return NoopContextManager2; - }() -); - -// node_modules/@opentelemetry/api/build/esm/api/context.js -var __read5 = function(o, n) { - var m = typeof Symbol === "function" && o[Symbol.iterator]; - if (!m) return o; - var i = m.call(o), r, ar = [], e; - try { - while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value); - } catch (error) { - e = { error }; - } finally { - try { - if (r && !r.done && (m = i["return"])) m.call(i); - } finally { - if (e) throw e.error; - } - } - return ar; -}; -var __spreadArray4 = function(to, from, pack) { - if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) { - if (ar || !(i in from)) { - if (!ar) ar = Array.prototype.slice.call(from, 0, i); - ar[i] = from[i]; - } - } - return to.concat(ar || Array.prototype.slice.call(from)); -}; -var API_NAME2 = "context"; -var NOOP_CONTEXT_MANAGER = new NoopContextManager(); -var ContextAPI = ( - /** @class */ - function() { - function ContextAPI2() { - } - ContextAPI2.getInstance = function() { - if (!this._instance) { - this._instance = new ContextAPI2(); - } - return this._instance; - }; - ContextAPI2.prototype.setGlobalContextManager = function(contextManager) { - return registerGlobal(API_NAME2, contextManager, DiagAPI.instance()); - }; - ContextAPI2.prototype.active = function() { - return this._getContextManager().active(); - }; - ContextAPI2.prototype.with = function(context2, fn, thisArg) { - var _a; - var args = []; - for (var _i = 3; _i < arguments.length; _i++) { - args[_i - 3] = arguments[_i]; - } - return (_a = this._getContextManager()).with.apply(_a, __spreadArray4([context2, fn, thisArg], __read5(args), false)); - }; - ContextAPI2.prototype.bind = function(context2, target) { - return this._getContextManager().bind(context2, target); - }; - ContextAPI2.prototype._getContextManager = function() { - return getGlobal(API_NAME2) || NOOP_CONTEXT_MANAGER; - }; - ContextAPI2.prototype.disable = function() { - this._getContextManager().disable(); - unregisterGlobal(API_NAME2, DiagAPI.instance()); - }; - return ContextAPI2; - }() -); - -// node_modules/@opentelemetry/api/build/esm/trace/trace_flags.js -var TraceFlags; -(function(TraceFlags2) { - TraceFlags2[TraceFlags2["NONE"] = 0] = "NONE"; - TraceFlags2[TraceFlags2["SAMPLED"] = 1] = "SAMPLED"; -})(TraceFlags || (TraceFlags = {})); - -// node_modules/@opentelemetry/api/build/esm/trace/invalid-span-constants.js -var INVALID_SPANID = "0000000000000000"; -var INVALID_TRACEID = "00000000000000000000000000000000"; -var INVALID_SPAN_CONTEXT = { - traceId: INVALID_TRACEID, - spanId: INVALID_SPANID, - traceFlags: TraceFlags.NONE -}; - -// node_modules/@opentelemetry/api/build/esm/trace/NonRecordingSpan.js -var NonRecordingSpan = ( - /** @class */ - function() { - function NonRecordingSpan2(_spanContext) { - if (_spanContext === void 0) { - _spanContext = INVALID_SPAN_CONTEXT; - } - this._spanContext = _spanContext; - } - NonRecordingSpan2.prototype.spanContext = function() { - return this._spanContext; - }; - NonRecordingSpan2.prototype.setAttribute = function(_key, _value) { - return this; - }; - NonRecordingSpan2.prototype.setAttributes = function(_attributes) { - return this; - }; - NonRecordingSpan2.prototype.addEvent = function(_name, _attributes) { - return this; - }; - NonRecordingSpan2.prototype.setStatus = function(_status) { - return this; - }; - NonRecordingSpan2.prototype.updateName = function(_name) { - return this; - }; - NonRecordingSpan2.prototype.end = function(_endTime) { - }; - NonRecordingSpan2.prototype.isRecording = function() { - return false; - }; - NonRecordingSpan2.prototype.recordException = function(_exception, _time) { - }; - return NonRecordingSpan2; - }() -); - -// node_modules/@opentelemetry/api/build/esm/trace/context-utils.js -var SPAN_KEY = createContextKey("OpenTelemetry Context Key SPAN"); -function getSpan(context2) { - return context2.getValue(SPAN_KEY) || void 0; -} -function getActiveSpan() { - return getSpan(ContextAPI.getInstance().active()); -} -function setSpan(context2, span) { - return context2.setValue(SPAN_KEY, span); -} -function deleteSpan(context2) { - return context2.deleteValue(SPAN_KEY); -} -function setSpanContext(context2, spanContext) { - return setSpan(context2, new NonRecordingSpan(spanContext)); -} -function getSpanContext(context2) { - var _a; - return (_a = getSpan(context2)) === null || _a === void 0 ? void 0 : _a.spanContext(); -} - -// node_modules/@opentelemetry/api/build/esm/trace/spancontext-utils.js -var VALID_TRACEID_REGEX = /^([0-9a-f]{32})$/i; -var VALID_SPANID_REGEX = /^[0-9a-f]{16}$/i; -function isValidTraceId(traceId) { - return VALID_TRACEID_REGEX.test(traceId) && traceId !== INVALID_TRACEID; -} -function isValidSpanId(spanId) { - return VALID_SPANID_REGEX.test(spanId) && spanId !== INVALID_SPANID; -} -function isSpanContextValid(spanContext) { - return isValidTraceId(spanContext.traceId) && isValidSpanId(spanContext.spanId); -} -function wrapSpanContext(spanContext) { - return new NonRecordingSpan(spanContext); -} - -// node_modules/@opentelemetry/api/build/esm/trace/NoopTracer.js -var contextApi = ContextAPI.getInstance(); -var NoopTracer = ( - /** @class */ - function() { - function NoopTracer2() { - } - NoopTracer2.prototype.startSpan = function(name, options, context2) { - if (context2 === void 0) { - context2 = contextApi.active(); - } - var root = Boolean(options === null || options === void 0 ? void 0 : options.root); - if (root) { - return new NonRecordingSpan(); - } - var parentFromContext = context2 && getSpanContext(context2); - if (isSpanContext(parentFromContext) && isSpanContextValid(parentFromContext)) { - return new NonRecordingSpan(parentFromContext); - } else { - return new NonRecordingSpan(); - } - }; - NoopTracer2.prototype.startActiveSpan = function(name, arg2, arg3, arg4) { - var opts; - var ctx; - var fn; - if (arguments.length < 2) { - return; - } else if (arguments.length === 2) { - fn = arg2; - } else if (arguments.length === 3) { - opts = arg2; - fn = arg3; - } else { - opts = arg2; - ctx = arg3; - fn = arg4; - } - var parentContext = ctx !== null && ctx !== void 0 ? ctx : contextApi.active(); - var span = this.startSpan(name, opts, parentContext); - var contextWithSpanSet = setSpan(parentContext, span); - return contextApi.with(contextWithSpanSet, fn, void 0, span); - }; - return NoopTracer2; - }() -); -function isSpanContext(spanContext) { - return typeof spanContext === "object" && typeof spanContext["spanId"] === "string" && typeof spanContext["traceId"] === "string" && typeof spanContext["traceFlags"] === "number"; -} - -// node_modules/@opentelemetry/api/build/esm/trace/ProxyTracer.js -var NOOP_TRACER = new NoopTracer(); -var ProxyTracer = ( - /** @class */ - function() { - function ProxyTracer2(_provider, name, version2, options) { - this._provider = _provider; - this.name = name; - this.version = version2; - this.options = options; - } - ProxyTracer2.prototype.startSpan = function(name, options, context2) { - return this._getTracer().startSpan(name, options, context2); - }; - ProxyTracer2.prototype.startActiveSpan = function(_name, _options, _context, _fn) { - var tracer = this._getTracer(); - return Reflect.apply(tracer.startActiveSpan, tracer, arguments); - }; - ProxyTracer2.prototype._getTracer = function() { - if (this._delegate) { - return this._delegate; - } - var tracer = this._provider.getDelegateTracer(this.name, this.version, this.options); - if (!tracer) { - return NOOP_TRACER; - } - this._delegate = tracer; - return this._delegate; - }; - return ProxyTracer2; - }() -); - -// node_modules/@opentelemetry/api/build/esm/trace/NoopTracerProvider.js -var NoopTracerProvider = ( - /** @class */ - function() { - function NoopTracerProvider2() { - } - NoopTracerProvider2.prototype.getTracer = function(_name, _version, _options) { - return new NoopTracer(); - }; - return NoopTracerProvider2; - }() -); - -// node_modules/@opentelemetry/api/build/esm/trace/ProxyTracerProvider.js -var NOOP_TRACER_PROVIDER = new NoopTracerProvider(); -var ProxyTracerProvider = ( - /** @class */ - function() { - function ProxyTracerProvider2() { - } - ProxyTracerProvider2.prototype.getTracer = function(name, version2, options) { - var _a; - return (_a = this.getDelegateTracer(name, version2, options)) !== null && _a !== void 0 ? _a : new ProxyTracer(this, name, version2, options); - }; - ProxyTracerProvider2.prototype.getDelegate = function() { - var _a; - return (_a = this._delegate) !== null && _a !== void 0 ? _a : NOOP_TRACER_PROVIDER; - }; - ProxyTracerProvider2.prototype.setDelegate = function(delegate) { - this._delegate = delegate; - }; - ProxyTracerProvider2.prototype.getDelegateTracer = function(name, version2, options) { - var _a; - return (_a = this._delegate) === null || _a === void 0 ? void 0 : _a.getTracer(name, version2, options); - }; - return ProxyTracerProvider2; - }() -); - -// node_modules/@opentelemetry/api/build/esm/trace/span_kind.js -var SpanKind; -(function(SpanKind2) { - SpanKind2[SpanKind2["INTERNAL"] = 0] = "INTERNAL"; - SpanKind2[SpanKind2["SERVER"] = 1] = "SERVER"; - SpanKind2[SpanKind2["CLIENT"] = 2] = "CLIENT"; - SpanKind2[SpanKind2["PRODUCER"] = 3] = "PRODUCER"; - SpanKind2[SpanKind2["CONSUMER"] = 4] = "CONSUMER"; -})(SpanKind || (SpanKind = {})); - -// node_modules/@opentelemetry/api/build/esm/trace/status.js -var SpanStatusCode; -(function(SpanStatusCode2) { - SpanStatusCode2[SpanStatusCode2["UNSET"] = 0] = "UNSET"; - SpanStatusCode2[SpanStatusCode2["OK"] = 1] = "OK"; - SpanStatusCode2[SpanStatusCode2["ERROR"] = 2] = "ERROR"; -})(SpanStatusCode || (SpanStatusCode = {})); - -// node_modules/@opentelemetry/api/build/esm/context-api.js -var context = ContextAPI.getInstance(); - -// node_modules/@opentelemetry/api/build/esm/propagation/NoopTextMapPropagator.js -var NoopTextMapPropagator = ( - /** @class */ - function() { - function NoopTextMapPropagator2() { - } - NoopTextMapPropagator2.prototype.inject = function(_context, _carrier) { - }; - NoopTextMapPropagator2.prototype.extract = function(context2, _carrier) { - return context2; - }; - NoopTextMapPropagator2.prototype.fields = function() { - return []; - }; - return NoopTextMapPropagator2; - }() -); - -// node_modules/@opentelemetry/api/build/esm/baggage/context-helpers.js -var BAGGAGE_KEY = createContextKey("OpenTelemetry Baggage Key"); -function getBaggage(context2) { - return context2.getValue(BAGGAGE_KEY) || void 0; -} -function getActiveBaggage() { - return getBaggage(ContextAPI.getInstance().active()); -} -function setBaggage(context2, baggage) { - return context2.setValue(BAGGAGE_KEY, baggage); -} -function deleteBaggage(context2) { - return context2.deleteValue(BAGGAGE_KEY); -} - -// node_modules/@opentelemetry/api/build/esm/api/propagation.js -var API_NAME3 = "propagation"; -var NOOP_TEXT_MAP_PROPAGATOR = new NoopTextMapPropagator(); -var PropagationAPI = ( - /** @class */ - function() { - function PropagationAPI2() { - this.createBaggage = createBaggage; - this.getBaggage = getBaggage; - this.getActiveBaggage = getActiveBaggage; - this.setBaggage = setBaggage; - this.deleteBaggage = deleteBaggage; - } - PropagationAPI2.getInstance = function() { - if (!this._instance) { - this._instance = new PropagationAPI2(); - } - return this._instance; - }; - PropagationAPI2.prototype.setGlobalPropagator = function(propagator) { - return registerGlobal(API_NAME3, propagator, DiagAPI.instance()); - }; - PropagationAPI2.prototype.inject = function(context2, carrier, setter) { - if (setter === void 0) { - setter = defaultTextMapSetter; - } - return this._getGlobalPropagator().inject(context2, carrier, setter); - }; - PropagationAPI2.prototype.extract = function(context2, carrier, getter) { - if (getter === void 0) { - getter = defaultTextMapGetter; - } - return this._getGlobalPropagator().extract(context2, carrier, getter); - }; - PropagationAPI2.prototype.fields = function() { - return this._getGlobalPropagator().fields(); - }; - PropagationAPI2.prototype.disable = function() { - unregisterGlobal(API_NAME3, DiagAPI.instance()); - }; - PropagationAPI2.prototype._getGlobalPropagator = function() { - return getGlobal(API_NAME3) || NOOP_TEXT_MAP_PROPAGATOR; - }; - return PropagationAPI2; - }() -); - -// node_modules/@opentelemetry/api/build/esm/propagation-api.js -var propagation = PropagationAPI.getInstance(); - -// node_modules/@opentelemetry/api/build/esm/api/trace.js -var API_NAME4 = "trace"; -var TraceAPI = ( - /** @class */ - function() { - function TraceAPI2() { - this._proxyTracerProvider = new ProxyTracerProvider(); - this.wrapSpanContext = wrapSpanContext; - this.isSpanContextValid = isSpanContextValid; - this.deleteSpan = deleteSpan; - this.getSpan = getSpan; - this.getActiveSpan = getActiveSpan; - this.getSpanContext = getSpanContext; - this.setSpan = setSpan; - this.setSpanContext = setSpanContext; - } - TraceAPI2.getInstance = function() { - if (!this._instance) { - this._instance = new TraceAPI2(); - } - return this._instance; - }; - TraceAPI2.prototype.setGlobalTracerProvider = function(provider) { - var success = registerGlobal(API_NAME4, this._proxyTracerProvider, DiagAPI.instance()); - if (success) { - this._proxyTracerProvider.setDelegate(provider); - } - return success; - }; - TraceAPI2.prototype.getTracerProvider = function() { - return getGlobal(API_NAME4) || this._proxyTracerProvider; - }; - TraceAPI2.prototype.getTracer = function(name, version2) { - return this.getTracerProvider().getTracer(name, version2); - }; - TraceAPI2.prototype.disable = function() { - unregisterGlobal(API_NAME4, DiagAPI.instance()); - this._proxyTracerProvider = new ProxyTracerProvider(); - }; - return TraceAPI2; - }() -); - -// node_modules/@opentelemetry/api/build/esm/trace-api.js -var trace = TraceAPI.getInstance(); - -// transport/message.ts -import { Type } from "@sinclair/typebox"; -var TransportMessageSchema = (t) => Type.Object({ - id: Type.String(), - from: Type.String(), - to: Type.String(), - seq: Type.Integer(), - ack: Type.Integer(), - serviceName: Type.Optional(Type.String()), - procedureName: Type.Optional(Type.String()), - streamId: Type.String(), - controlFlags: Type.Integer(), - tracing: Type.Optional( - Type.Object({ - traceparent: Type.String(), - tracestate: Type.String() - }) - ), - payload: t -}); -var ControlMessageAckSchema = Type.Object({ - type: Type.Literal("ACK") -}); -var ControlMessageCloseSchema = Type.Object({ - type: Type.Literal("CLOSE") -}); -var currentProtocolVersion = "v2.0"; -var acceptedProtocolVersions = ["v1.1", currentProtocolVersion]; -function isAcceptedProtocolVersion(version2) { - return acceptedProtocolVersions.includes(version2); -} -var ControlMessageHandshakeRequestSchema = Type.Object({ - type: Type.Literal("HANDSHAKE_REQ"), - protocolVersion: Type.String(), - sessionId: Type.String(), - /** - * Specifies what the server's expected session state (from the pov of the client). This can be - * used by the server to know whether this is a new or a reestablished connection, and whether it - * is compatible with what it already has. - */ - expectedSessionState: Type.Object({ - // what the client expects the server to send next - nextExpectedSeq: Type.Integer(), - nextSentSeq: Type.Integer() - }), - metadata: Type.Optional(Type.Unknown()) -}); -var HandshakeErrorRetriableResponseCodes = Type.Union([ - Type.Literal("SESSION_STATE_MISMATCH") -]); -var HandshakeErrorCustomHandlerFatalResponseCodes = Type.Union([ - // The custom validation handler rejected the handler because the client is unsupported. - Type.Literal("REJECTED_UNSUPPORTED_CLIENT"), - // The custom validation handler rejected the handshake. - Type.Literal("REJECTED_BY_CUSTOM_HANDLER") -]); -var HandshakeErrorFatalResponseCodes = Type.Union([ - HandshakeErrorCustomHandlerFatalResponseCodes, - // The ciient sent a handshake that doesn't comply with the extended handshake metadata. - Type.Literal("MALFORMED_HANDSHAKE_META"), - // The ciient sent a handshake that doesn't comply with ControlMessageHandshakeRequestSchema. - Type.Literal("MALFORMED_HANDSHAKE"), - // The client's protocol version does not match the server's. - Type.Literal("PROTOCOL_VERSION_MISMATCH") -]); -var HandshakeErrorResponseCodes = Type.Union([ - HandshakeErrorRetriableResponseCodes, - HandshakeErrorFatalResponseCodes -]); -var ControlMessageHandshakeResponseSchema = Type.Object({ - type: Type.Literal("HANDSHAKE_RESP"), - status: Type.Union([ - Type.Object({ - ok: Type.Literal(true), - sessionId: Type.String() - }), - Type.Object({ - ok: Type.Literal(false), - reason: Type.String(), - code: HandshakeErrorResponseCodes - }) - ]) -}); -var ControlMessagePayloadSchema = Type.Union([ - ControlMessageCloseSchema, - ControlMessageAckSchema, - ControlMessageHandshakeRequestSchema, - ControlMessageHandshakeResponseSchema -]); -var OpaqueTransportMessageSchema = TransportMessageSchema( - Type.Unknown() -); -function handshakeResponseMessage({ - from, - to, - status -}) { - return { - id: generateId(), - from, - to, - seq: 0, - ack: 0, - streamId: generateId(), - controlFlags: 0, - payload: { - type: "HANDSHAKE_RESP", - status - } - }; -} -function closeStreamMessage(streamId) { - return { - streamId, - controlFlags: 8 /* StreamClosedBit */, - payload: { - type: "CLOSE" - } - }; -} -function cancelMessage(streamId, payload) { - return { - streamId, - controlFlags: 4 /* StreamCancelBit */, - payload - }; -} -function isAck(controlFlag) { - return (controlFlag & 1 /* AckBit */) === 1 /* AckBit */; -} -function isStreamOpen(controlFlag) { - return ( - /* eslint-disable-next-line @typescript-eslint/no-unsafe-enum-comparison */ - (controlFlag & 2 /* StreamOpenBit */) === 2 /* StreamOpenBit */ - ); -} -function isStreamClose(controlFlag) { - return ( - /* eslint-disable-next-line @typescript-eslint/no-unsafe-enum-comparison */ - (controlFlag & 8 /* StreamClosedBit */) === 8 /* StreamClosedBit */ - ); -} -function isStreamCancel(controlFlag) { - return ( - /* eslint-disable-next-line @typescript-eslint/no-unsafe-enum-comparison */ - (controlFlag & 4 /* StreamCancelBit */) === 4 /* StreamCancelBit */ - ); -} - -// codec/json.ts -var encoder = new TextEncoder(); -var decoder = new TextDecoder(); -function uint8ArrayToBase64(uint8Array) { - let binary = ""; - uint8Array.forEach((byte) => { - binary += String.fromCharCode(byte); - }); - return btoa(binary); -} -function base64ToUint8Array(base64) { - const binaryString = atob(base64); - const uint8Array = new Uint8Array(binaryString.length); - for (let i = 0; i < binaryString.length; i++) { - uint8Array[i] = binaryString.charCodeAt(i); - } - return uint8Array; -} -var NaiveJsonCodec = { - toBuffer: (obj) => { - return encoder.encode( - JSON.stringify(obj, function replacer(key) { - const val = this[key]; - if (val instanceof Uint8Array) { - return { $t: uint8ArrayToBase64(val) }; - } else if (typeof val === "bigint") { - return { $b: val.toString() }; - } else { - return val; - } - }) - ); - }, - fromBuffer: (buff) => { - const parsed = JSON.parse( - decoder.decode(buff), - function reviver(_key, val) { - if (val?.$t !== void 0) { - return base64ToUint8Array(val.$t); - } else if (val?.$b !== void 0) { - return BigInt(val.$b); - } else { - return val; - } - } - ); - if (typeof parsed !== "object" || parsed === null) { - throw new Error("unpacked msg is not an object"); - } - return parsed; - } -}; - -// transport/options.ts -var defaultTransportOptions = { - heartbeatIntervalMs: 1e3, - heartbeatsUntilDead: 2, - sessionDisconnectGraceMs: 5e3, - connectionTimeoutMs: 2e3, - handshakeTimeoutMs: 1e3, - enableTransparentSessionReconnects: true, - codec: NaiveJsonCodec -}; -var defaultConnectionRetryOptions = { - baseIntervalMs: 150, - maxJitterMs: 200, - maxBackoffMs: 32e3, - attemptBudgetCapacity: 5, - budgetRestoreIntervalMs: 200, - isFatalConnectionError: () => false -}; -var defaultClientTransportOptions = { - ...defaultTransportOptions, - ...defaultConnectionRetryOptions -}; -var defaultServerTransportOptions = { - ...defaultTransportOptions -}; - -// logging/log.ts -var LoggingLevels = { - debug: -1, - info: 0, - warn: 1, - error: 2 -}; -var cleanedLogFn = (log) => { - return (msg, metadata) => { - if (metadata && !metadata.telemetry) { - const span = trace.getSpan(context.active()); - if (span) { - metadata.telemetry = { - traceId: span.spanContext().traceId, - spanId: span.spanContext().spanId - }; - } - } - if (!metadata?.transportMessage) { - log(msg, metadata); - return; - } - const { payload, ...rest } = metadata.transportMessage; - metadata.transportMessage = rest; - log(msg, metadata); - }; -}; -var BaseLogger = class { - minLevel; - output; - constructor(output, minLevel = "info") { - this.minLevel = minLevel; - this.output = output; - } - debug(msg, metadata) { - if (LoggingLevels[this.minLevel] <= LoggingLevels.debug) { - this.output(msg, metadata ?? {}, "debug"); - } - } - info(msg, metadata) { - if (LoggingLevels[this.minLevel] <= LoggingLevels.info) { - this.output(msg, metadata ?? {}, "info"); - } - } - warn(msg, metadata) { - if (LoggingLevels[this.minLevel] <= LoggingLevels.warn) { - this.output(msg, metadata ?? {}, "warn"); - } - } - error(msg, metadata) { - if (LoggingLevels[this.minLevel] <= LoggingLevels.error) { - this.output(msg, metadata ?? {}, "error"); - } - } -}; -var createLogProxy = (log) => ({ - debug: cleanedLogFn(log.debug.bind(log)), - info: cleanedLogFn(log.info.bind(log)), - warn: cleanedLogFn(log.warn.bind(log)), - error: cleanedLogFn(log.error.bind(log)) -}); - -// transport/events.ts -var ProtocolError = { - RetriesExceeded: "conn_retry_exceeded", - HandshakeFailed: "handshake_failed", - MessageOrderingViolated: "message_ordering_violated", - InvalidMessage: "invalid_message", - MessageSendFailure: "message_send_failure" -}; -var EventDispatcher = class { - eventListeners = {}; - removeAllListeners() { - this.eventListeners = {}; - } - numberOfListeners(eventType) { - return this.eventListeners[eventType]?.size ?? 0; - } - addEventListener(eventType, handler) { - if (!this.eventListeners[eventType]) { - this.eventListeners[eventType] = /* @__PURE__ */ new Set(); - } - this.eventListeners[eventType]?.add(handler); - } - removeEventListener(eventType, handler) { - const handlers = this.eventListeners[eventType]; - if (handlers) { - this.eventListeners[eventType]?.delete(handler); - } - } - dispatchEvent(eventType, event) { - const handlers = this.eventListeners[eventType]; - if (handlers) { - const copy = [...handlers]; - for (const handler of copy) { - handler(event); - } - } - } -}; - -// transport/sessionStateMachine/common.ts -var ERR_CONSUMED = `session state has been consumed and is no longer valid`; -var StateMachineState = class { - /* - * Whether this state has been consumed - * and we've moved on to another state - */ - _isConsumed; - /** - * Cleanup this state machine state and mark it as consumed. - * After calling close, it is an error to access any properties on the state. - * You should never need to call this as a consumer. - * - * If you're looking to close the session from the client, - * use `.hardDisconnect` on the client transport. - */ - close() { - this._handleClose(); - } - constructor() { - this._isConsumed = false; - return new Proxy(this, { - get(target, prop) { - if (prop === "_isConsumed" || prop === "id" || prop === "state") { - return Reflect.get(target, prop); - } - if (prop === "_handleStateExit") { - return () => { - target._isConsumed = true; - target._handleStateExit(); - }; - } - if (prop === "_handleClose") { - return () => { - target._isConsumed = true; - target._handleStateExit(); - target._handleClose(); - }; - } - if (target._isConsumed) { - throw new Error( - `${ERR_CONSUMED}: getting ${prop.toString()} on consumed state` - ); - } - return Reflect.get(target, prop); - }, - set(target, prop, value) { - if (target._isConsumed) { - throw new Error( - `${ERR_CONSUMED}: setting ${prop.toString()} on consumed state` - ); - } - return Reflect.set(target, prop, value); - } - }); - } -}; -var CommonSession = class extends StateMachineState { - from; - options; - codec; - tracer; - log; - constructor({ from, options, log, tracer, codec }) { - super(); - this.from = from; - this.options = options; - this.log = log; - this.tracer = tracer; - this.codec = codec; - } -}; -var IdentifiedSession = class extends CommonSession { - id; - telemetry; - to; - protocolVersion; - /** - * Index of the message we will send next (excluding handshake) - */ - seq; - /** - * Last seq we sent over the wire this session (excluding handshake) and retransmissions - */ - seqSent; - /** - * Number of unique messages we've received this session (excluding handshake) - */ - ack; - sendBuffer; - constructor(props) { - const { - id, - to, - seq, - ack, - sendBuffer, - telemetry, - log, - protocolVersion, - seqSent: messagesSent - } = props; - super(props); - this.id = id; - this.to = to; - this.seq = seq; - this.ack = ack; - this.sendBuffer = sendBuffer; - this.telemetry = telemetry; - this.log = log; - this.protocolVersion = protocolVersion; - this.seqSent = messagesSent; - } - get loggingMetadata() { - const metadata = { - clientId: this.from, - connectedTo: this.to, - sessionId: this.id - }; - if (this.telemetry.span.isRecording()) { - const spanContext = this.telemetry.span.spanContext(); - metadata.telemetry = { - traceId: spanContext.traceId, - spanId: spanContext.spanId - }; - } - return metadata; - } - constructMsg(partialMsg) { - const msg = { - ...partialMsg, - id: generateId(), - to: this.to, - from: this.from, - seq: this.seq, - ack: this.ack - }; - this.seq++; - return msg; - } - nextSeq() { - return this.sendBuffer.length > 0 ? this.sendBuffer[0].seq : this.seq; - } - send(msg) { - const constructedMsg = this.constructMsg(msg); - this.sendBuffer.push(constructedMsg); - return { - ok: true, - value: constructedMsg.id - }; - } - _handleStateExit() { - } - _handleClose() { - this.sendBuffer.length = 0; - this.telemetry.span.end(); - } -}; -var IdentifiedSessionWithGracePeriod = class extends IdentifiedSession { - graceExpiryTime; - gracePeriodTimeout; - listeners; - constructor(props) { - super(props); - this.listeners = props.listeners; - this.graceExpiryTime = props.graceExpiryTime; - this.gracePeriodTimeout = setTimeout(() => { - this.listeners.onSessionGracePeriodElapsed(); - }, this.graceExpiryTime - Date.now()); - } - _handleStateExit() { - super._handleStateExit(); - if (this.gracePeriodTimeout) { - clearTimeout(this.gracePeriodTimeout); - this.gracePeriodTimeout = void 0; - } - } - _handleClose() { - super._handleClose(); - } -}; -function sendMessage(conn, codec, msg) { - const buff = codec.toBuffer(msg); - if (!buff.ok) { - return buff; - } - const sent = conn.send(buff.value); - if (!sent) { - return { - ok: false, - reason: "failed to send message" - }; - } - return { - ok: true, - value: msg.id - }; -} - -// transport/sessionStateMachine/SessionConnecting.ts -var SessionConnecting = class extends IdentifiedSessionWithGracePeriod { - state = "Connecting" /* Connecting */; - connPromise; - listeners; - connectionTimeout; - constructor(props) { - super(props); - this.connPromise = props.connPromise; - this.listeners = props.listeners; - this.connPromise.then( - (conn) => { - if (this._isConsumed) return; - this.listeners.onConnectionEstablished(conn); - }, - (err) => { - if (this._isConsumed) return; - this.listeners.onConnectionFailed(err); - } - ); - this.connectionTimeout = setTimeout(() => { - this.listeners.onConnectionTimeout(); - }, this.options.connectionTimeoutMs); - } - // close a pending connection if it resolves, ignore errors if the promise - // ends up rejected anyways - bestEffortClose() { - const logger = this.log; - const metadata = this.loggingMetadata; - this.connPromise.then((conn) => { - conn.close(); - logger?.info( - "connection eventually resolved but session has transitioned, closed connection", - { - ...metadata, - ...conn.loggingMetadata - } - ); - }).catch(() => { - }); - } - _handleStateExit() { - super._handleStateExit(); - if (this.connectionTimeout) { - clearTimeout(this.connectionTimeout); - this.connectionTimeout = void 0; - } - } - _handleClose() { - super._handleClose(); - this.bestEffortClose(); - } -}; - -// transport/sessionStateMachine/SessionNoConnection.ts -var SessionNoConnection = class extends IdentifiedSessionWithGracePeriod { - state = "NoConnection" /* NoConnection */; - _handleClose() { - super._handleClose(); - } - _handleStateExit() { - super._handleStateExit(); - } -}; - -// router/services.ts -import { Type as Type3, Kind as Kind2 } from "@sinclair/typebox"; - -// router/errors.ts -import { - Kind, - Type as Type2 -} from "@sinclair/typebox"; -var UNCAUGHT_ERROR_CODE = "UNCAUGHT_ERROR"; -var UNEXPECTED_DISCONNECT_CODE = "UNEXPECTED_DISCONNECT"; -var INVALID_REQUEST_CODE = "INVALID_REQUEST"; -var CANCEL_CODE = "CANCEL"; -var ErrResultSchema = (t) => Type2.Object({ - ok: Type2.Literal(false), - payload: t -}); -var ValidationErrorDetails = Type2.Object({ - path: Type2.String(), - message: Type2.String() -}); -var ValidationErrors = Type2.Array(ValidationErrorDetails); -function castTypeboxValueErrors(errors) { - const result = []; - for (const error of errors) { - result.push({ - path: error.path, - message: error.message - }); - } - return result; -} -var CancelErrorSchema = Type2.Object({ - code: Type2.Literal(CANCEL_CODE), - message: Type2.String() -}); -var CancelResultSchema = ErrResultSchema(CancelErrorSchema); -var ReaderErrorSchema = Type2.Union([ - Type2.Object({ - code: Type2.Literal(UNCAUGHT_ERROR_CODE), - message: Type2.String() - }), - Type2.Object({ - code: Type2.Literal(UNEXPECTED_DISCONNECT_CODE), - message: Type2.String() - }), - Type2.Object({ - code: Type2.Literal(INVALID_REQUEST_CODE), - message: Type2.String(), - extras: Type2.Optional( - Type2.Object({ - firstValidationErrors: Type2.Array(ValidationErrorDetails), - totalErrors: Type2.Number() - }) - ) - }), - CancelErrorSchema -]); -var ReaderErrorResultSchema = ErrResultSchema(ReaderErrorSchema); -function isUnion(schema) { - return schema[Kind] === "Union"; -} -function flattenErrorType(errType) { - if (!isUnion(errType)) { - return errType; - } - const flattenedTypes = []; - function flatten(type) { - if (isUnion(type)) { - for (const t of type.anyOf) { - flatten(t); - } - } else { - flattenedTypes.push(type); - } - } - flatten(errType); - return Type2.Union(flattenedTypes); -} - -// router/services.ts -function Strict(schema) { - return JSON.parse(JSON.stringify(schema)); -} -function createServiceSchema() { - return class ServiceSchema2 { - /** - * Factory function for creating a fresh state. - */ - initializeState; - /** - * The procedures for this service. - */ - procedures; - /** - * @param config - The configuration for this service. - * @param procedures - The procedures for this service. - */ - constructor(config, procedures) { - this.initializeState = config.initializeState; - this.procedures = procedures; - } - /** - * Creates a {@link ServiceScaffold}, which can be used to define procedures - * that can then be merged into a {@link ServiceSchema}, via the scaffold's - * `finalize` method. - * - * There are two patterns that work well with this method. The first is using - * it to separate the definition of procedures from the definition of the - * service's configuration: - * ```ts - * const MyServiceScaffold = ServiceSchema.scaffold({ - * initializeState: () => ({ count: 0 }), - * }); - * - * const incrementProcedures = MyServiceScaffold.procedures({ - * increment: Procedure.rpc({ - * requestInit: Type.Object({ amount: Type.Number() }), - * responseData: Type.Object({ current: Type.Number() }), - * async handler(ctx, init) { - * ctx.state.count += init.amount; - * return Ok({ current: ctx.state.count }); - * } - * }), - * }) - * - * const MyService = MyServiceScaffold.finalize({ - * ...incrementProcedures, - * // you can also directly define procedures here - * }); - * ``` - * This might be really handy if you have a very large service and you're - * wanting to split it over multiple files. You can define the scaffold - * in one file, and then import that scaffold in other files where you - * define procedures - and then finally import the scaffolds and your - * procedure objects in a final file where you finalize the scaffold into - * a service schema. - * - * The other way is to use it like in a builder pattern: - * ```ts - * const MyService = ServiceSchema - * .scaffold({ initializeState: () => ({ count: 0 }) }) - * .finalize({ - * increment: Procedure.rpc({ - * requestInit: Type.Object({ amount: Type.Number() }), - * responseData: Type.Object({ current: Type.Number() }), - * async handler(ctx, init) { - * ctx.state.count += init.amount; - * return Ok({ current: ctx.state.count }); - * } - * }), - * }) - * ``` - * Depending on your preferences, this may be a more appealing way to define - * a schema versus using the {@link ServiceSchema.define} method. - */ - static scaffold(config) { - return new ServiceScaffold(config); - } - // actual implementation - static define(configOrProcedures, maybeProcedures) { - let config; - let procedures; - if ("initializeState" in configOrProcedures && typeof configOrProcedures.initializeState === "function") { - if (!maybeProcedures) { - throw new Error("Expected procedures to be defined"); - } - config = configOrProcedures; - procedures = maybeProcedures; - } else { - config = { initializeState: () => ({}) }; - procedures = configOrProcedures; - } - return new ServiceSchema2(config, procedures); - } - /** - * Serializes this schema's procedures into a plain object that is JSON compatible. - */ - serialize() { - return { - procedures: Object.fromEntries( - Object.entries(this.procedures).map(([procName, procDef]) => [ - procName, - { - init: Strict(procDef.requestInit), - output: Strict(procDef.responseData), - errors: getSerializedProcErrors(procDef), - // Only add `description` field if the type declares it. - ..."description" in procDef ? { description: procDef.description } : {}, - type: procDef.type, - // Only add the `input` field if the type declares it. - ..."requestData" in procDef ? { - input: Strict(procDef.requestData) - } : {} - } - ]) - ) - }; - } - // TODO remove once clients migrate to v2 - /** - * Same as {@link ServiceSchema.serialize}, but with a format that is compatible with - * protocol v1. This is useful to be able to continue to generate schemas for older - * clients as they are still supported. - */ - serializeV1Compat() { - return { - procedures: Object.fromEntries( - Object.entries(this.procedures).map( - ([procName, procDef]) => { - if (procDef.type === "rpc" || procDef.type === "subscription") { - return [ - procName, - { - // BACKWARDS COMPAT: map init to input for protocolv1 - // this is the only change needed to make it compatible. - input: Strict(procDef.requestInit), - output: Strict(procDef.responseData), - errors: getSerializedProcErrors(procDef), - // Only add `description` field if the type declares it. - ..."description" in procDef ? { description: procDef.description } : {}, - type: procDef.type - } - ]; - } - return [ - procName, - { - init: Strict(procDef.requestInit), - output: Strict(procDef.responseData), - errors: getSerializedProcErrors(procDef), - // Only add `description` field if the type declares it. - ..."description" in procDef ? { description: procDef.description } : {}, - type: procDef.type, - input: Strict(procDef.requestData) - } - ]; - } - ) - ) - }; - } - /** - * Instantiates this schema into a {@link Service} object. - * - * You probably don't need this, usually the River server will handle this - * for you. - */ - instantiate(extendedContext) { - const state = this.initializeState(extendedContext); - const dispose = async () => { - await state[Symbol.asyncDispose]?.(); - state[Symbol.dispose]?.(); - }; - return Object.freeze({ - state, - procedures: this.procedures, - [Symbol.asyncDispose]: dispose - }); - } - }; -} -function getSerializedProcErrors(procDef) { - if (!("responseError" in procDef) || procDef.responseError[Kind2] === "Never") { - return Strict(ReaderErrorSchema); - } - const withProtocolErrors = flattenErrorType( - Type3.Union([procDef.responseError, ReaderErrorSchema]) - ); - return Strict(withProtocolErrors); -} -var ServiceScaffold = class { - /** - * The configuration for this service. - */ - config; - /** - * @param config - The configuration for this service. - */ - constructor(config) { - this.config = config; - } - /** - * Define procedures for this service. Use the {@link Procedure} constructors - * to create them. This returns the procedures object, which can then be - * passed to {@link ServiceSchema.finalize} to create a {@link ServiceSchema}. - * - * @example - * ``` - * const myProcedures = MyServiceScaffold.procedures({ - * myRPC: Procedure.rpc({ - * // ... - * }), - * }); - * - * const MyService = MyServiceScaffold.finalize({ - * ...myProcedures, - * }); - * ``` - * - * @param procedures - The procedures for this service. - */ - procedures(procedures) { - return procedures; - } - /** - * Finalizes the scaffold into a {@link ServiceSchema}. This is where you - * provide the service's procedures and get a {@link ServiceSchema} in return. - * - * You can directly define procedures here, or you can define them separately - * with the {@link ServiceScaffold.procedures} method, and then pass them here. - * - * @example - * ``` - * const MyService = MyServiceScaffold.finalize({ - * myRPC: Procedure.rpc({ - * // ... - * }), - * // e.g. from the procedures method - * ...myOtherProcedures, - * }); - * ``` - */ - finalize(procedures) { - return createServiceSchema().define( - this.config, - procedures - ); - } -}; - -// router/result.ts -import { Type as Type4 } from "@sinclair/typebox"; -var AnyResultSchema = Type4.Union([ - Type4.Object({ - ok: Type4.Literal(false), - payload: Type4.Object({ - code: Type4.String(), - message: Type4.String(), - extras: Type4.Optional(Type4.Unknown()) - }) - }), - Type4.Object({ - ok: Type4.Literal(true), - payload: Type4.Unknown() - }) -]); -function Ok(payload) { - return { - ok: true, - payload - }; -} -function Err(error) { - return { - ok: false, - payload: error - }; -} - -// router/streams.ts -var ReadableBrokenError = { - code: "READABLE_BROKEN", - message: "Readable was broken before it is fully consumed" -}; -function createPromiseWithResolvers() { - let resolve; - let reject; - const promise = new Promise((res, rej) => { - resolve = res; - reject = rej; - }); - return { - promise, - // @ts-expect-error promise callbacks are sync - resolve, - // @ts-expect-error promise callbacks are sync - reject - }; -} -var ReadableImpl = class { - /** - * Whether the {@link Readable} is closed. - * - * Closed {@link Readable}s are done receiving values, but that doesn't affect - * any other aspect of the {@link Readable} such as it's consumability. - */ - closed = false; - /** - * Whether the {@link Readable} is locked. - * - * @see {@link Readable}'s typedoc to understand locking - */ - locked = false; - /** - * Whether {@link break} was called. - * - * @see {@link break} for more information - */ - broken = false; - /** - * This flag allows us to avoid emitting a {@link ReadableBrokenError} after {@link break} was called - * in cases where the {@link queue} is fully consumed and {@link ReadableImpl} is {@link closed}. This is just an - * ergonomic feature to avoid emitting an error in our iteration when we don't have to. - */ - brokenWithValuesLeftToRead = false; - /** - * A list of values that have been pushed to the {@link ReadableImpl} but not yet emitted to the user. - */ - queue = []; - /** - * Used by methods in the class to signal to the iterator that it - * should check for the next value. - */ - next = null; - /** - * Consumes the {@link Readable} and returns an {@link AsyncIterator} that can be used - * to iterate over the values in the {@link Readable}. - */ - [Symbol.asyncIterator]() { - if (this.locked) { - throw new TypeError("Readable is already locked"); - } - this.locked = true; - let didSignalBreak = false; - return { - next: async () => { - if (didSignalBreak) { - return { - done: true, - value: void 0 - }; - } - while (this.queue.length === 0) { - if (this.closed && !this.brokenWithValuesLeftToRead) { - return { - done: true, - value: void 0 - }; - } - if (this.broken) { - didSignalBreak = true; - return { - done: false, - value: Err(ReadableBrokenError) - }; - } - if (!this.next) { - this.next = createPromiseWithResolvers(); - } - await this.next.promise; - this.next = null; - } - const value = this.queue.shift(); - return { done: false, value }; - }, - return: async () => { - this.break(); - return { done: true, value: void 0 }; - } - }; - } - /** - * Collects all the values from the {@link Readable} into an array. - * - * @see {@link Readable}'s typedoc for more information - */ - async collect() { - const array = []; - for await (const value of this) { - array.push(value); - } - return array; - } - /** - * Breaks the {@link Readable} and signals an error to any iterators waiting for the next value. - * - * @see {@link Readable}'s typedoc for more information - */ - break() { - if (this.broken) { - return; - } - this.locked = true; - this.broken = true; - this.brokenWithValuesLeftToRead = this.queue.length > 0; - this.queue.length = 0; - this.next?.resolve(); - } - /** - * Whether the {@link Readable} is readable. - * - * @see {@link Readable}'s typedoc for more information - */ - isReadable() { - return !this.locked && !this.broken; - } - /** - * Pushes a value to be read. - */ - _pushValue(value) { - if (this.broken) { - return; - } - if (this.closed) { - throw new Error("Cannot push to closed Readable"); - } - this.queue.push(value); - this.next?.resolve(); - } - /** - * Triggers the close of the {@link Readable}. Make sure to push all remaining - * values before calling this method. - */ - _triggerClose() { - if (this.closed) { - throw new Error("Unexpected closing multiple times"); - } - this.closed = true; - this.next?.resolve(); - } - /** - * @internal meant for use within river, not exposed as a public API - */ - _hasValuesInQueue() { - return this.queue.length > 0; - } - /** - * Whether the {@link Readable} is closed. - */ - isClosed() { - return this.closed; - } -}; -var WritableImpl = class { - /** - * Passed via constructor to pass on calls to {@link write} - */ - writeCb; - /** - * Passed via constructor to pass on calls to {@link close} - */ - closeCb; - /** - * Whether {@link close} was called, and {@link Writable} is not writable anymore. - */ - closed = false; - constructor(callbacks) { - this.writeCb = callbacks.writeCb; - this.closeCb = callbacks.closeCb; - } - write(value) { - if (this.closed) { - throw new Error("Cannot write to closed Writable"); - } - this.writeCb(value); - } - isWritable() { - return !this.closed; - } - close(value) { - if (this.closed) { - return; - } - if (value !== void 0) { - this.writeCb(value); - } - this.closed = true; - this.writeCb = () => void 0; - this.closeCb(); - this.closeCb = () => void 0; - } - /** - * @internal meant for use within river, not exposed as a public API - */ - isClosed() { - return this.closed; - } -}; - -// router/procedures.ts -import { Type as Type5 } from "@sinclair/typebox"; -function rpc({ - requestInit, - responseData, - responseError = Type5.Never(), - description, - handler -}) { - return { - ...description ? { description } : {}, - type: "rpc", - requestInit, - responseData, - responseError, - handler - }; -} -function upload({ - requestInit, - requestData, - responseData, - responseError = Type5.Never(), - description, - handler -}) { - return { - type: "upload", - ...description ? { description } : {}, - requestInit, - requestData, - responseData, - responseError, - handler - }; -} -function subscription({ - requestInit, - responseData, - responseError = Type5.Never(), - description, - handler -}) { - return { - type: "subscription", - ...description ? { description } : {}, - requestInit, - responseData, - responseError, - handler - }; -} -function stream({ - requestInit, - requestData, - responseData, - responseError = Type5.Never(), - description, - handler -}) { - return { - type: "stream", - ...description ? { description } : {}, - requestInit, - requestData, - responseData, - responseError, - handler - }; -} -var Procedure = { - rpc, - upload, - subscription, - stream -}; - -// router/server.ts -import { Value } from "@sinclair/typebox/value"; - -// transport/stringifyError.ts -function coerceErrorString(err) { - if (err instanceof Error) { - return err.message || "unknown reason"; - } - return `[coerced to error] ${String(err)}`; -} - -// router/server.ts -var RiverServer = class { - transport; - contextMap; - log; - middlewares; - /** - * We create a tombstones for streams cancelled by the server - * so that we don't hit errors when the client has inflight - * requests it sent before it saw the cancel. - * We track cancelled streams for every client separately, so - * that bad clients don't affect good clients. - */ - serverCancelledStreams; - maxCancelledStreamTombstonesPerSession; - streams; - services; - unregisterTransportListeners; - constructor(transport, services2, handshakeOptions, extendedContext, maxCancelledStreamTombstonesPerSession = 200, middlewares = []) { - const instances = {}; - this.middlewares = middlewares; - this.services = instances; - this.contextMap = /* @__PURE__ */ new Map(); - extendedContext = extendedContext ?? {}; - for (const [name, service] of Object.entries(services2)) { - const instance = service.instantiate(extendedContext); - instances[name] = instance; - this.contextMap.set(instance, { - ...extendedContext, - state: instance.state - }); - } - if (handshakeOptions) { - transport.extendHandshake(handshakeOptions); - } - this.transport = transport; - this.streams = /* @__PURE__ */ new Map(); - this.serverCancelledStreams = /* @__PURE__ */ new Map(); - this.maxCancelledStreamTombstonesPerSession = maxCancelledStreamTombstonesPerSession; - this.log = transport.log; - const handleCreatingNewStreams = (message) => { - if (message.to !== this.transport.clientId) { - this.log?.info( - `got msg with destination that isn't this server, ignoring`, - { - clientId: this.transport.clientId, - transportMessage: message - } - ); - return; - } - const streamId = message.streamId; - const stream2 = this.streams.get(streamId); - if (stream2) { - stream2.handleMsg(message); - return; - } - if (this.serverCancelledStreams.get(message.from)?.has(streamId)) { - return; - } - const newStreamProps = this.validateNewProcStream(message); - if (!newStreamProps) { - return; - } - createHandlerSpan( - transport.tracer, - newStreamProps.initialSession, - newStreamProps.procedure.type, - newStreamProps.serviceName, - newStreamProps.procedureName, - newStreamProps.streamId, - newStreamProps.tracingCtx, - (span) => { - this.createNewProcStream(span, newStreamProps); - } - ); - }; - const handleSessionStatus = (evt) => { - if (evt.status !== "closing") return; - const disconnectedClientId = evt.session.to; - this.log?.info( - `got session disconnect from ${disconnectedClientId}, cleaning up streams`, - evt.session.loggingMetadata - ); - for (const stream2 of this.streams.values()) { - if (stream2.from === disconnectedClientId) { - stream2.handleSessionDisconnect(); - } - } - this.serverCancelledStreams.delete(disconnectedClientId); - }; - const handleTransportStatus = (evt) => { - if (evt.status !== "closed") return; - this.unregisterTransportListeners(); - }; - this.unregisterTransportListeners = () => { - this.transport.removeEventListener("message", handleCreatingNewStreams); - this.transport.removeEventListener("sessionStatus", handleSessionStatus); - this.transport.removeEventListener( - "transportStatus", - handleTransportStatus - ); - }; - this.transport.addEventListener("message", handleCreatingNewStreams); - this.transport.addEventListener("sessionStatus", handleSessionStatus); - this.transport.addEventListener("transportStatus", handleTransportStatus); - } - createNewProcStream(span, props) { - const { - streamId, - initialSession, - procedureName, - serviceName, - procedure, - sessionMetadata, - serviceContext, - initPayload, - procClosesWithInit, - passInitAsDataForBackwardsCompat - } = props; - const { - to: from, - loggingMetadata, - protocolVersion, - id: sessionId - } = initialSession; - loggingMetadata.telemetry = { - traceId: span.spanContext().traceId, - spanId: span.spanContext().spanId - }; - let cleanClose = true; - const onMessage = (msg) => { - if (msg.from !== from) { - this.log?.error("got stream message from unexpected client", { - ...loggingMetadata, - transportMessage: msg, - tags: ["invariant-violation"] - }); - return; - } - if (isStreamCancelBackwardsCompat(msg.controlFlags, protocolVersion)) { - let cancelResult; - if (Value.Check(CancelResultSchema, msg.payload)) { - cancelResult = msg.payload; - } else { - cancelResult = Err({ - code: CANCEL_CODE, - message: "stream cancelled, client sent invalid payload" - }); - this.log?.warn("got stream cancel without a valid protocol error", { - ...loggingMetadata, - transportMessage: msg, - validationErrors: [ - ...Value.Errors(CancelResultSchema, msg.payload) - ], - tags: ["invalid-request"] - }); - } - if (!reqReadable.isClosed()) { - reqReadable._pushValue(cancelResult); - closeReadable(); - } - resWritable.close(); - return; - } - if (reqReadable.isClosed()) { - this.log?.warn("received message after request stream is closed", { - ...loggingMetadata, - transportMessage: msg, - tags: ["invalid-request"] - }); - onServerCancel({ - code: INVALID_REQUEST_CODE, - message: "received message after request stream is closed" - }); - return; - } - if ("requestData" in procedure && Value.Check(procedure.requestData, msg.payload)) { - reqReadable._pushValue(Ok(msg.payload)); - if (isStreamCloseBackwardsCompat(msg.controlFlags, protocolVersion)) { - closeReadable(); - } - return; - } - if (Value.Check(ControlMessagePayloadSchema, msg.payload) && isStreamCloseBackwardsCompat(msg.controlFlags, protocolVersion)) { - closeReadable(); - return; - } - let validationErrors; - let errMessage; - if ("requestData" in procedure) { - errMessage = "message in requestData position did not match schema"; - validationErrors = castTypeboxValueErrors( - Value.Errors(procedure.requestData, msg.payload) - ); - } else { - validationErrors = castTypeboxValueErrors( - Value.Errors(ControlMessagePayloadSchema, msg.payload) - ); - errMessage = "message in control payload position did not match schema"; - } - this.log?.warn(errMessage, { - ...loggingMetadata, - transportMessage: msg, - validationErrors: validationErrors.map((error) => ({ - path: error.path, - message: error.message - })), - tags: ["invalid-request"] - }); - onServerCancel({ - code: INVALID_REQUEST_CODE, - message: errMessage, - extras: { - totalErrors: validationErrors.length, - firstValidationErrors: validationErrors.slice(0, 5) - } - }); - }; - const finishedController = new AbortController(); - const procStream = { - from, - streamId, - procedureName, - serviceName, - sessionMetadata, - procedure, - handleMsg: onMessage, - handleSessionDisconnect: () => { - cleanClose = false; - const errPayload = { - code: UNEXPECTED_DISCONNECT_CODE, - message: "client unexpectedly disconnected" - }; - if (!reqReadable.isClosed()) { - reqReadable._pushValue(Err(errPayload)); - closeReadable(); - } - resWritable.close(); - } - }; - const sessionScopedSend = this.transport.getSessionBoundSendFn( - from, - sessionId - ); - const cancelStream = (streamId2, payload) => { - this.cancelStream(from, sessionScopedSend, streamId2, payload); - }; - const onServerCancel = (e) => { - recordRiverError(span, e); - if (reqReadable.isClosed() && resWritable.isClosed()) { - return; - } - cleanClose = false; - const result = Err(e); - if (!reqReadable.isClosed()) { - reqReadable._pushValue(result); - closeReadable(); - } - resWritable.close(); - cancelStream(streamId, result); - }; - const cleanup = () => { - finishedController.abort(); - this.streams.delete(streamId); - }; - const procClosesWithResponse = procedure.type === "rpc" || procedure.type === "upload"; - const reqReadable = new ReadableImpl(); - const closeReadable = () => { - reqReadable._triggerClose(); - if (protocolVersion === "v1.1") { - if (!procClosesWithResponse && !resWritable.isClosed()) { - resWritable.close(); - } - } - if (resWritable.isClosed()) { - cleanup(); - } - }; - if (passInitAsDataForBackwardsCompat) { - reqReadable._pushValue(Ok(initPayload)); - } - const resWritable = new WritableImpl({ - writeCb: (response) => { - if (!response.ok) { - recordRiverError(span, response.payload); - } - sessionScopedSend({ - streamId, - controlFlags: procClosesWithResponse ? getStreamCloseBackwardsCompat(protocolVersion) : 0, - payload: response - }); - if (procClosesWithResponse) { - resWritable.close(); - } - }, - // close callback - closeCb: () => { - if (!procClosesWithResponse && cleanClose) { - const message = closeStreamMessage(streamId); - message.controlFlags = getStreamCloseBackwardsCompat(protocolVersion); - sessionScopedSend(message); - } - if (protocolVersion === "v1.1") { - if (!reqReadable.isClosed()) { - closeReadable(); - } - } - if (reqReadable.isClosed()) { - cleanup(); - } - } - }); - const onHandlerError = (err, span2) => { - const errorMsg = coerceErrorString(err); - span2.recordException(err instanceof Error ? err : new Error(errorMsg)); - this.log?.error( - `${serviceName}.${procedureName} handler threw an uncaught error`, - { - ...loggingMetadata, - transportMessage: { - procedureName, - serviceName - }, - extras: { - error: errorMsg, - originalException: err - }, - tags: ["uncaught-handler-error"] - } - ); - onServerCancel({ - code: UNCAUGHT_ERROR_CODE, - message: errorMsg - }); - }; - if (procClosesWithInit) { - closeReadable(); - } - const handlerContextWithSpan = { - ...serviceContext, - from, - sessionId, - metadata: sessionMetadata, - span, - cancel: (message) => { - const errRes = { - code: CANCEL_CODE, - message: message ?? "cancelled by server procedure handler" - }; - onServerCancel(errRes); - return Err(errRes); - }, - signal: finishedController.signal - }; - const middlewareContext = { - ...serviceContext, - sessionId, - from, - metadata: sessionMetadata, - span, - signal: finishedController.signal, - streamId, - procedureName, - serviceName - }; - const runProcedureHandler = async () => { - switch (procedure.type) { - case "rpc": - try { - const responsePayload = await procedure.handler({ - ctx: handlerContextWithSpan, - reqInit: initPayload - }); - if (resWritable.isClosed()) { - return; - } - resWritable.write(responsePayload); - } catch (err) { - onHandlerError(err, span); - } finally { - span.end(); - } - break; - case "stream": - try { - await procedure.handler({ - ctx: handlerContextWithSpan, - reqInit: initPayload, - reqReadable, - resWritable - }); - } catch (err) { - onHandlerError(err, span); - } finally { - span.end(); - } - break; - case "subscription": - try { - await procedure.handler({ - ctx: handlerContextWithSpan, - reqInit: initPayload, - resWritable - }); - } catch (err) { - onHandlerError(err, span); - } finally { - span.end(); - } - break; - case "upload": - try { - const responsePayload = await procedure.handler({ - ctx: handlerContextWithSpan, - reqInit: initPayload, - reqReadable - }); - if (resWritable.isClosed()) { - return; - } - resWritable.write(responsePayload); - } catch (err) { - onHandlerError(err, span); - } finally { - span.end(); - } - break; - } - }; - this.middlewares.reduceRight( - (next, middleware) => { - return () => { - middleware({ - ctx: middlewareContext, - reqInit: initPayload, - next - }); - }; - }, - () => { - void runProcedureHandler(); - } - )(); - if (!finishedController.signal.aborted) { - this.streams.set(streamId, procStream); - } - } - getContext(service, serviceName) { - const context2 = this.contextMap.get(service); - if (!context2) { - const err = `no context found for ${serviceName}`; - this.log?.error(err, { - clientId: this.transport.clientId, - tags: ["invariant-violation"] - }); - throw new Error(err); - } - return context2; - } - validateNewProcStream(initMessage) { - const session = this.transport.sessions.get(initMessage.from); - if (!session) { - this.log?.error(`couldn't find session for ${initMessage.from}`, { - clientId: this.transport.clientId, - transportMessage: initMessage, - tags: ["invariant-violation"] - }); - return null; - } - const sessionScopedSend = this.transport.getSessionBoundSendFn( - initMessage.from, - session.id - ); - const cancelStream = (streamId, payload) => { - this.cancelStream(initMessage.from, sessionScopedSend, streamId, payload); - }; - const sessionMetadata = this.transport.sessionHandshakeMetadata.get( - session.to - ); - if (!sessionMetadata) { - const errMessage = `session doesn't have handshake metadata`; - this.log?.error(errMessage, { - ...session.loggingMetadata, - tags: ["invariant-violation"] - }); - cancelStream( - initMessage.streamId, - Err({ - code: UNCAUGHT_ERROR_CODE, - message: errMessage - }) - ); - return null; - } - if (!isStreamOpen(initMessage.controlFlags)) { - const errMessage = `can't create a new procedure stream from a message that doesn't have the stream open bit set`; - this.log?.warn(errMessage, { - ...session.loggingMetadata, - clientId: this.transport.clientId, - transportMessage: initMessage, - tags: ["invalid-request"] - }); - cancelStream( - initMessage.streamId, - Err({ - code: INVALID_REQUEST_CODE, - message: errMessage - }) - ); - return null; - } - if (!initMessage.serviceName) { - const errMessage = `missing service name in stream open message`; - this.log?.warn(errMessage, { - ...session.loggingMetadata, - transportMessage: initMessage, - tags: ["invalid-request"] - }); - cancelStream( - initMessage.streamId, - Err({ - code: INVALID_REQUEST_CODE, - message: errMessage - }) - ); - return null; - } - if (!initMessage.procedureName) { - const errMessage = `missing procedure name in stream open message`; - this.log?.warn(errMessage, { - ...session.loggingMetadata, - transportMessage: initMessage, - tags: ["invalid-request"] - }); - cancelStream( - initMessage.streamId, - Err({ - code: INVALID_REQUEST_CODE, - message: errMessage - }) - ); - return null; - } - if (!(initMessage.serviceName in this.services)) { - const errMessage = `couldn't find service ${initMessage.serviceName}`; - this.log?.warn(errMessage, { - ...session.loggingMetadata, - clientId: this.transport.clientId, - transportMessage: initMessage, - tags: ["invalid-request"] - }); - cancelStream( - initMessage.streamId, - Err({ - code: INVALID_REQUEST_CODE, - message: errMessage - }) - ); - return null; - } - const service = this.services[initMessage.serviceName]; - if (!(initMessage.procedureName in service.procedures)) { - const errMessage = `couldn't find a matching procedure for ${initMessage.serviceName}.${initMessage.procedureName}`; - this.log?.warn(errMessage, { - ...session.loggingMetadata, - transportMessage: initMessage, - tags: ["invalid-request"] - }); - cancelStream( - initMessage.streamId, - Err({ - code: INVALID_REQUEST_CODE, - message: errMessage - }) - ); - return null; - } - const serviceContext = this.getContext(service, initMessage.serviceName); - const procedure = service.procedures[initMessage.procedureName]; - if (!["rpc", "upload", "stream", "subscription"].includes(procedure.type)) { - this.log?.error( - `got request for invalid procedure type ${procedure.type} at ${initMessage.serviceName}.${initMessage.procedureName}`, - { - ...session.loggingMetadata, - transportMessage: initMessage, - tags: ["invariant-violation"] - } - ); - return null; - } - let passInitAsDataForBackwardsCompat = false; - if (session.protocolVersion === "v1.1" && (procedure.type === "upload" || procedure.type === "stream") && Value.Check(procedure.requestData, initMessage.payload) && Value.Check(procedure.requestInit, {})) { - passInitAsDataForBackwardsCompat = true; - } else if (!Value.Check(procedure.requestInit, initMessage.payload)) { - const errMessage = `procedure init failed validation`; - this.log?.warn(errMessage, { - ...session.loggingMetadata, - clientId: this.transport.clientId, - transportMessage: initMessage, - tags: ["invalid-request"] - }); - cancelStream( - initMessage.streamId, - Err({ - code: INVALID_REQUEST_CODE, - message: errMessage - }) - ); - return null; - } - return { - initialSession: session, - streamId: initMessage.streamId, - procedureName: initMessage.procedureName, - serviceName: initMessage.serviceName, - tracingCtx: initMessage.tracing, - initPayload: initMessage.payload, - sessionMetadata, - procedure, - serviceContext, - procClosesWithInit: isStreamCloseBackwardsCompat( - initMessage.controlFlags, - session.protocolVersion - ), - passInitAsDataForBackwardsCompat - }; - } - cancelStream(to, sessionScopedSend, streamId, payload) { - let cancelledStreamsInSession = this.serverCancelledStreams.get(to); - if (!cancelledStreamsInSession) { - cancelledStreamsInSession = new LRUSet( - this.maxCancelledStreamTombstonesPerSession - ); - this.serverCancelledStreams.set(to, cancelledStreamsInSession); - } - cancelledStreamsInSession.add(streamId); - const msg = cancelMessage(streamId, payload); - sessionScopedSend(msg); - } - async close() { - this.unregisterTransportListeners(); - for (const serviceName of Object.keys(this.services)) { - const service = this.services[serviceName]; - await service[Symbol.asyncDispose](); - } - } -}; -var LRUSet = class { - items; - maxItems; - constructor(maxItems) { - this.items = /* @__PURE__ */ new Set(); - this.maxItems = maxItems; - } - add(item) { - if (this.items.has(item)) { - this.items.delete(item); - } else if (this.items.size >= this.maxItems) { - const first = this.items.values().next(); - if (!first.done) { - this.items.delete(first.value); - } - } - this.items.add(item); - } - has(item) { - return this.items.has(item); - } -}; -function isStreamCancelBackwardsCompat(controlFlags, protocolVersion) { - if (protocolVersion === "v1.1") { - return false; - } - return isStreamCancel(controlFlags); -} -function isStreamCloseBackwardsCompat(controlFlags, protocolVersion) { - if (protocolVersion === "v1.1") { - return isStreamCancel(controlFlags); - } - return isStreamClose(controlFlags); -} -function getStreamCloseBackwardsCompat(protocolVersion) { - if (protocolVersion === "v1.1") { - return 4 /* StreamCancelBit */; - } - return 8 /* StreamClosedBit */; -} -function createServer(transport, services2, providedServerOptions) { - return new RiverServer( - transport, - services2, - providedServerOptions?.handshakeOptions, - providedServerOptions?.extendedContext, - providedServerOptions?.maxCancelledStreamTombstonesPerSession, - providedServerOptions?.middlewares - ); -} - -// router/handshake.ts -function createServerHandshakeOptions(schema, validate) { - return { schema, validate }; -} - -// package.json -var version = "0.212.2"; - -// tracing/index.ts -function createSessionTelemetryInfo(tracer, sessionId, to, from, propagationCtx) { - const parentCtx = propagationCtx ? propagation.extract(context.active(), propagationCtx) : context.active(); - const span = tracer.startSpan( - `river.session`, - { - attributes: { - component: "river", - "river.session.id": sessionId, - "river.session.to": to, - "river.session.from": from - } - }, - parentCtx - ); - const ctx = trace.setSpan(parentCtx, span); - return { span, ctx }; -} -function createConnectionTelemetryInfo(tracer, connection, info) { - const span = tracer.startSpan( - `river.connection`, - { - attributes: { - component: "river", - "river.connection.id": connection.id - }, - links: [{ context: info.span.spanContext() }] - }, - info.ctx - ); - const ctx = trace.setSpan(info.ctx, span); - return { span, ctx }; -} -function createHandlerSpan(tracer, session, kind, serviceName, procedureName, streamId, tracing, fn) { - const ctx = tracing ? propagation.extract(context.active(), tracing) : context.active(); - return tracer.startActiveSpan( - `river.server.${serviceName}.${procedureName}`, - { - attributes: { - component: "river", - "river.method.kind": kind, - "river.method.service": serviceName, - "river.method.name": procedureName, - "river.streamId": streamId, - "span.kind": "server" - }, - links: [{ context: session.telemetry.span.spanContext() }], - kind: SpanKind.SERVER - }, - ctx, - fn - ); -} -function recordRiverError(span, error) { - span.setStatus({ - code: SpanStatusCode.ERROR, - message: error.message - }); - span.setAttributes({ - "river.error_code": error.code, - "river.error_message": error.message - }); -} -function getTracer() { - return trace.getTracer("river", version); -} - -// transport/sessionStateMachine/SessionWaitingForHandshake.ts -var SessionWaitingForHandshake = class extends CommonSession { - state = "WaitingForHandshake" /* WaitingForHandshake */; - conn; - listeners; - handshakeTimeout; - constructor(props) { - super(props); - this.conn = props.conn; - this.listeners = props.listeners; - this.handshakeTimeout = setTimeout(() => { - this.listeners.onHandshakeTimeout(); - }, this.options.handshakeTimeoutMs); - this.conn.setDataListener(this.onHandshakeData); - this.conn.setErrorListener(this.listeners.onConnectionErrored); - this.conn.setCloseListener(this.listeners.onConnectionClosed); - } - get loggingMetadata() { - return { - clientId: this.from, - connId: this.conn.id, - ...this.conn.loggingMetadata - }; - } - onHandshakeData = (msg) => { - const parsedMsgRes = this.codec.fromBuffer(msg); - if (!parsedMsgRes.ok) { - this.listeners.onInvalidHandshake( - `could not parse handshake message: ${parsedMsgRes.reason}`, - "MALFORMED_HANDSHAKE" - ); - return; - } - this.listeners.onHandshake(parsedMsgRes.value); - }; - sendHandshake(msg) { - return sendMessage(this.conn, this.codec, msg); - } - _handleStateExit() { - this.conn.removeDataListener(); - this.conn.removeErrorListener(); - this.conn.removeCloseListener(); - clearTimeout(this.handshakeTimeout); - this.handshakeTimeout = void 0; - } - _handleClose() { - this.conn.close(); - } -}; - -// transport/sessionStateMachine/SessionHandshaking.ts -var SessionHandshaking = class extends IdentifiedSessionWithGracePeriod { - state = "Handshaking" /* Handshaking */; - conn; - listeners; - handshakeTimeout; - constructor(props) { - super(props); - this.conn = props.conn; - this.listeners = props.listeners; - this.handshakeTimeout = setTimeout(() => { - this.listeners.onHandshakeTimeout(); - }, this.options.handshakeTimeoutMs); - this.conn.setDataListener(this.onHandshakeData); - this.conn.setErrorListener(this.listeners.onConnectionErrored); - this.conn.setCloseListener(this.listeners.onConnectionClosed); - } - get loggingMetadata() { - return { - ...super.loggingMetadata, - ...this.conn.loggingMetadata - }; - } - onHandshakeData = (msg) => { - const parsedMsgRes = this.codec.fromBuffer(msg); - if (!parsedMsgRes.ok) { - this.listeners.onInvalidHandshake( - `could not parse handshake message: ${parsedMsgRes.reason}`, - "MALFORMED_HANDSHAKE" - ); - return; - } - this.listeners.onHandshake(parsedMsgRes.value); - }; - sendHandshake(msg) { - return sendMessage(this.conn, this.codec, msg); - } - _handleStateExit() { - super._handleStateExit(); - this.conn.removeDataListener(); - this.conn.removeErrorListener(); - this.conn.removeCloseListener(); - if (this.handshakeTimeout) { - clearTimeout(this.handshakeTimeout); - this.handshakeTimeout = void 0; - } - } - _handleClose() { - super._handleClose(); - this.conn.close(); - } -}; - -// transport/sessionStateMachine/SessionConnected.ts -var SessionConnected = class extends IdentifiedSession { - state = "Connected" /* Connected */; - conn; - listeners; - heartbeatHandle; - heartbeatMissTimeout; - isActivelyHeartbeating = false; - updateBookkeeping(ack, seq) { - this.sendBuffer = this.sendBuffer.filter((unacked) => unacked.seq >= ack); - this.ack = seq + 1; - if (this.heartbeatMissTimeout) { - clearTimeout(this.heartbeatMissTimeout); - } - this.startMissingHeartbeatTimeout(); - } - assertSendOrdering(constructedMsg) { - if (constructedMsg.seq > this.seqSent + 1) { - const msg = `invariant violation: would have sent out of order msg (seq: ${constructedMsg.seq}, expected: ${this.seqSent} + 1)`; - this.log?.error(msg, { - ...this.loggingMetadata, - transportMessage: constructedMsg, - tags: ["invariant-violation"] - }); - throw new Error(msg); - } - } - send(msg) { - const constructedMsg = this.constructMsg(msg); - this.assertSendOrdering(constructedMsg); - this.sendBuffer.push(constructedMsg); - const res = sendMessage(this.conn, this.codec, constructedMsg); - if (!res.ok) { - this.listeners.onMessageSendFailure(constructedMsg, res.reason); - return res; - } - this.seqSent = constructedMsg.seq; - return res; - } - constructor(props) { - super(props); - this.conn = props.conn; - this.listeners = props.listeners; - this.conn.setDataListener(this.onMessageData); - this.conn.setCloseListener(this.listeners.onConnectionClosed); - this.conn.setErrorListener(this.listeners.onConnectionErrored); - } - sendBufferedMessages() { - if (this.sendBuffer.length > 0) { - this.log?.info( - `sending ${this.sendBuffer.length} buffered messages, starting at seq ${this.nextSeq()}`, - this.loggingMetadata - ); - for (const msg of this.sendBuffer) { - this.assertSendOrdering(msg); - const res = sendMessage(this.conn, this.codec, msg); - if (!res.ok) { - this.listeners.onMessageSendFailure(msg, res.reason); - return res; - } - this.seqSent = msg.seq; - } - } - return { ok: true, value: void 0 }; - } - get loggingMetadata() { - return { - ...super.loggingMetadata, - ...this.conn.loggingMetadata - }; - } - startMissingHeartbeatTimeout() { - const maxMisses = this.options.heartbeatsUntilDead; - const missDuration = maxMisses * this.options.heartbeatIntervalMs; - this.heartbeatMissTimeout = setTimeout(() => { - this.log?.info( - `closing connection to ${this.to} due to inactivity (missed ${maxMisses} heartbeats which is ${missDuration}ms)`, - this.loggingMetadata - ); - this.telemetry.span.addEvent( - "closing connection due to missing heartbeat" - ); - this.conn.close(); - }, missDuration); - } - startActiveHeartbeat() { - this.isActivelyHeartbeating = true; - this.heartbeatHandle = setInterval(() => { - this.sendHeartbeat(); - }, this.options.heartbeatIntervalMs); - } - sendHeartbeat() { - this.log?.debug("sending heartbeat", this.loggingMetadata); - const heartbeat = { - streamId: "heartbeat", - controlFlags: 1 /* AckBit */, - payload: { - type: "ACK" - } - }; - this.send(heartbeat); - } - onMessageData = (msg) => { - const parsedMsgRes = this.codec.fromBuffer(msg); - if (!parsedMsgRes.ok) { - this.listeners.onInvalidMessage( - `could not parse message: ${parsedMsgRes.reason}` - ); - return; - } - const parsedMsg = parsedMsgRes.value; - if (parsedMsg.seq !== this.ack) { - if (parsedMsg.seq < this.ack) { - this.log?.debug( - `received duplicate msg (got seq: ${parsedMsg.seq}, wanted seq: ${this.ack}), discarding`, - { - ...this.loggingMetadata, - transportMessage: parsedMsg - } - ); - } else { - const reason = `received out-of-order msg, closing connection (got seq: ${parsedMsg.seq}, wanted seq: ${this.ack})`; - this.log?.error(reason, { - ...this.loggingMetadata, - transportMessage: parsedMsg, - tags: ["invariant-violation"] - }); - this.telemetry.span.setStatus({ - code: SpanStatusCode.ERROR, - message: reason - }); - this.conn.close(); - } - return; - } - this.log?.debug(`received msg`, { - ...this.loggingMetadata, - transportMessage: parsedMsg - }); - this.updateBookkeeping(parsedMsg.ack, parsedMsg.seq); - if (!isAck(parsedMsg.controlFlags)) { - this.listeners.onMessage(parsedMsg); - return; - } - this.log?.debug(`discarding msg (ack bit set)`, { - ...this.loggingMetadata, - transportMessage: parsedMsg - }); - if (!this.isActivelyHeartbeating) { - this.sendHeartbeat(); - } - }; - _handleStateExit() { - super._handleStateExit(); - this.conn.removeDataListener(); - this.conn.removeCloseListener(); - this.conn.removeErrorListener(); - if (this.heartbeatHandle) { - clearInterval(this.heartbeatHandle); - this.heartbeatHandle = void 0; - } - if (this.heartbeatMissTimeout) { - clearTimeout(this.heartbeatMissTimeout); - this.heartbeatMissTimeout = void 0; - } - } - _handleClose() { - super._handleClose(); - this.conn.close(); - } -}; - -// transport/sessionStateMachine/SessionBackingOff.ts -var SessionBackingOff = class extends IdentifiedSessionWithGracePeriod { - state = "BackingOff" /* BackingOff */; - listeners; - backoffTimeout; - constructor(props) { - super(props); - this.listeners = props.listeners; - this.backoffTimeout = setTimeout(() => { - this.listeners.onBackoffFinished(); - }, props.backoffMs); - } - _handleClose() { - super._handleClose(); - } - _handleStateExit() { - super._handleStateExit(); - if (this.backoffTimeout) { - clearTimeout(this.backoffTimeout); - this.backoffTimeout = void 0; - } - } -}; - -// codec/adapter.ts -import { Value as Value2 } from "@sinclair/typebox/value"; -var CodecMessageAdapter = class { - constructor(codec) { - this.codec = codec; - } - toBuffer(msg) { - try { - return { - ok: true, - value: this.codec.toBuffer(msg) - }; - } catch (e) { - return { - ok: false, - reason: coerceErrorString(e) - }; - } - } - fromBuffer(buf) { - try { - const parsedMsg = this.codec.fromBuffer(buf); - if (!Value2.Check(OpaqueTransportMessageSchema, parsedMsg)) { - return { - ok: false, - reason: "transport message schema mismatch" - }; - } - return { - ok: true, - value: parsedMsg - }; - } catch (e) { - return { - ok: false, - reason: coerceErrorString(e) - }; - } - } -}; - -// transport/sessionStateMachine/transitions.ts -function inheritSharedSession(session) { - return { - id: session.id, - from: session.from, - to: session.to, - seq: session.seq, - ack: session.ack, - seqSent: session.seqSent, - sendBuffer: session.sendBuffer, - telemetry: session.telemetry, - options: session.options, - log: session.log, - tracer: session.tracer, - protocolVersion: session.protocolVersion, - codec: session.codec - }; -} -function inheritSharedSessionWithGrace(session) { - return { - ...inheritSharedSession(session), - graceExpiryTime: session.graceExpiryTime - }; -} -var SessionStateGraph = { - entrypoints: { - NoConnection: (to, from, listeners, options, protocolVersion, tracer, log) => { - const id = `session-${generateId()}`; - const telemetry = createSessionTelemetryInfo(tracer, id, to, from); - const sendBuffer = []; - const session = new SessionNoConnection({ - listeners, - id, - from, - to, - seq: 0, - ack: 0, - seqSent: 0, - graceExpiryTime: Date.now() + options.sessionDisconnectGraceMs, - sendBuffer, - telemetry, - options, - protocolVersion, - tracer, - log, - codec: new CodecMessageAdapter(options.codec) - }); - session.log?.info(`session ${session.id} created in NoConnection state`, { - ...session.loggingMetadata, - tags: ["state-transition"] - }); - return session; - }, - WaitingForHandshake: (from, conn, listeners, options, tracer, log) => { - const session = new SessionWaitingForHandshake({ - conn, - listeners, - from, - options, - tracer, - log, - codec: new CodecMessageAdapter(options.codec) - }); - session.log?.info(`session created in WaitingForHandshake state`, { - ...session.loggingMetadata, - tags: ["state-transition"] - }); - return session; - } - }, - // All of the transitions 'move'/'consume' the old session and return a new one. - // After a session is transitioned, any usage of the old session will throw. - transition: { - // happy path transitions - NoConnectionToBackingOff: (oldSession, backoffMs, listeners) => { - const carriedState = inheritSharedSessionWithGrace(oldSession); - oldSession._handleStateExit(); - const session = new SessionBackingOff({ - backoffMs, - listeners, - ...carriedState - }); - session.log?.info( - `session ${session.id} transition from NoConnection to BackingOff`, - { - ...session.loggingMetadata, - tags: ["state-transition"] - } - ); - return session; - }, - BackingOffToConnecting: (oldSession, connPromise, listeners) => { - const carriedState = inheritSharedSessionWithGrace(oldSession); - oldSession._handleStateExit(); - const session = new SessionConnecting({ - connPromise, - listeners, - ...carriedState - }); - session.log?.info( - `session ${session.id} transition from BackingOff to Connecting`, - { - ...session.loggingMetadata, - tags: ["state-transition"] - } - ); - return session; - }, - ConnectingToHandshaking: (oldSession, conn, listeners) => { - const carriedState = inheritSharedSessionWithGrace(oldSession); - oldSession._handleStateExit(); - const session = new SessionHandshaking({ - conn, - listeners, - ...carriedState - }); - conn.telemetry = createConnectionTelemetryInfo( - session.tracer, - conn, - session.telemetry - ); - session.log?.info( - `session ${session.id} transition from Connecting to Handshaking`, - { - ...session.loggingMetadata, - tags: ["state-transition"] - } - ); - return session; - }, - HandshakingToConnected: (oldSession, listeners) => { - const carriedState = inheritSharedSession(oldSession); - const conn = oldSession.conn; - oldSession._handleStateExit(); - const session = new SessionConnected({ - conn, - listeners, - ...carriedState - }); - session.startMissingHeartbeatTimeout(); - session.log?.info( - `session ${session.id} transition from Handshaking to Connected`, - { - ...session.loggingMetadata, - tags: ["state-transition"] - } - ); - return session; - }, - WaitingForHandshakeToConnected: (pendingSession, oldSession, sessionId, to, propagationCtx, listeners, protocolVersion) => { - const conn = pendingSession.conn; - const { from, options } = pendingSession; - const carriedState = oldSession ? ( - // old session exists, inherit state - inheritSharedSession(oldSession) - ) : ( - // old session does not exist, create new state - { - id: sessionId, - from, - to, - seq: 0, - ack: 0, - seqSent: 0, - sendBuffer: [], - telemetry: createSessionTelemetryInfo( - pendingSession.tracer, - sessionId, - to, - from, - propagationCtx - ), - options, - tracer: pendingSession.tracer, - log: pendingSession.log, - protocolVersion, - codec: new CodecMessageAdapter(options.codec) - } - ); - pendingSession._handleStateExit(); - oldSession?._handleStateExit(); - const session = new SessionConnected({ - conn, - listeners, - ...carriedState - }); - session.startMissingHeartbeatTimeout(); - conn.telemetry = createConnectionTelemetryInfo( - session.tracer, - conn, - session.telemetry - ); - session.log?.info( - `session ${session.id} transition from WaitingForHandshake to Connected`, - { - ...session.loggingMetadata, - tags: ["state-transition"] - } - ); - return session; - }, - // disconnect paths - BackingOffToNoConnection: (oldSession, listeners) => { - const carriedState = inheritSharedSessionWithGrace(oldSession); - oldSession._handleStateExit(); - const session = new SessionNoConnection({ - listeners, - ...carriedState - }); - session.log?.info( - `session ${session.id} transition from BackingOff to NoConnection`, - { - ...session.loggingMetadata, - tags: ["state-transition"] - } - ); - return session; - }, - ConnectingToNoConnection: (oldSession, listeners) => { - const carriedState = inheritSharedSessionWithGrace(oldSession); - oldSession.bestEffortClose(); - oldSession._handleStateExit(); - const session = new SessionNoConnection({ - listeners, - ...carriedState - }); - session.log?.info( - `session ${session.id} transition from Connecting to NoConnection`, - { - ...session.loggingMetadata, - tags: ["state-transition"] - } - ); - return session; - }, - HandshakingToNoConnection: (oldSession, listeners) => { - const carriedState = inheritSharedSessionWithGrace(oldSession); - oldSession.conn.close(); - oldSession._handleStateExit(); - const session = new SessionNoConnection({ - listeners, - ...carriedState - }); - session.log?.info( - `session ${session.id} transition from Handshaking to NoConnection`, - { - ...session.loggingMetadata, - tags: ["state-transition"] - } - ); - return session; - }, - ConnectedToNoConnection: (oldSession, listeners) => { - const carriedState = inheritSharedSession(oldSession); - const graceExpiryTime = Date.now() + oldSession.options.sessionDisconnectGraceMs; - oldSession.conn.close(); - oldSession._handleStateExit(); - const session = new SessionNoConnection({ - listeners, - graceExpiryTime, - ...carriedState - }); - session.log?.info( - `session ${session.id} transition from Connected to NoConnection`, - { - ...session.loggingMetadata, - tags: ["state-transition"] - } - ); - return session; - } - } -}; -var transitions = SessionStateGraph.transition; -var ClientSessionStateGraph = { - entrypoint: SessionStateGraph.entrypoints.NoConnection, - transition: { - // happy paths - // NoConnection -> BackingOff: attempt to connect - NoConnectionToBackingOff: transitions.NoConnectionToBackingOff, - // BackingOff -> Connecting: backoff period elapsed, start connection - BackingOffToConnecting: transitions.BackingOffToConnecting, - // Connecting -> Handshaking: connection established, start handshake - ConnectingToHandshaking: transitions.ConnectingToHandshaking, - // Handshaking -> Connected: handshake complete, session ready - HandshakingToConnected: transitions.HandshakingToConnected, - // disconnect paths - // BackingOff -> NoConnection: unused - BackingOffToNoConnection: transitions.BackingOffToNoConnection, - // Connecting -> NoConnection: connection failed or connection timeout - ConnectingToNoConnection: transitions.ConnectingToNoConnection, - // Handshaking -> NoConnection: connection closed or handshake timeout - HandshakingToNoConnection: transitions.HandshakingToNoConnection, - // Connected -> NoConnection: connection closed - ConnectedToNoConnection: transitions.ConnectedToNoConnection - // destroy/close paths - // NoConnection -> x: grace period elapsed - // BackingOff -> x: grace period elapsed - // Connecting -> x: grace period elapsed - // Handshaking -> x: grace period elapsed or invalid handshake message or handshake rejection - // Connected -> x: grace period elapsed or invalid message - } -}; -var ServerSessionStateGraph = { - entrypoint: SessionStateGraph.entrypoints.WaitingForHandshake, - transition: { - // happy paths - // WaitingForHandshake -> Connected: handshake complete, session ready - WaitingForHandshakeToConnected: transitions.WaitingForHandshakeToConnected, - // disconnect paths - // Connected -> NoConnection: connection closed - ConnectedToNoConnection: transitions.ConnectedToNoConnection - // destroy/close paths - // WaitingForHandshake -> x: handshake timeout elapsed or invalid handshake message or handshake rejection or connection closed - } -}; - -// transport/transport.ts -var Transport = class { - /** - * The status of the transport. - */ - status; - /** - * The client ID of this transport. - */ - clientId; - /** - * The event dispatcher for handling events of type EventTypes. - */ - eventDispatcher; - /** - * The options for this transport. - */ - options; - log; - tracer; - sessions; - /** - * Creates a new Transport instance. - * @param codec The codec used to encode and decode messages. - * @param clientId The client ID of this transport. - */ - constructor(clientId, providedOptions) { - this.options = { ...defaultTransportOptions, ...providedOptions }; - this.eventDispatcher = new EventDispatcher(); - this.clientId = clientId; - this.status = "open"; - this.sessions = /* @__PURE__ */ new Map(); - this.tracer = getTracer(); - } - bindLogger(fn, level) { - if (typeof fn === "function") { - this.log = createLogProxy(new BaseLogger(fn, level)); - return; - } - this.log = createLogProxy(fn); - } - /** - * Called when a message is received by this transport. - * You generally shouldn't need to override this in downstream transport implementations. - * @param message The received message. - */ - handleMsg(message) { - if (this.getStatus() !== "open") return; - this.eventDispatcher.dispatchEvent("message", message); - } - /** - * Adds a listener to this transport. - * @param the type of event to listen for - * @param handler The message handler to add. - */ - addEventListener(type, handler) { - this.eventDispatcher.addEventListener(type, handler); - } - /** - * Removes a listener from this transport. - * @param the type of event to un-listen on - * @param handler The message handler to remove. - */ - removeEventListener(type, handler) { - this.eventDispatcher.removeEventListener(type, handler); - } - protocolError(message) { - this.eventDispatcher.dispatchEvent("protocolError", message); - } - /** - * Default close implementation for transports. You should override this in the downstream - * implementation if you need to do any additional cleanup and call super.close() at the end. - * Closes the transport. Any messages sent while the transport is closed will be silently discarded. - */ - close() { - this.status = "closed"; - const sessions = Array.from(this.sessions.values()); - for (const session of sessions) { - this.deleteSession(session); - } - this.eventDispatcher.dispatchEvent("transportStatus", { - status: this.status - }); - this.eventDispatcher.removeAllListeners(); - this.log?.info(`manually closed transport`, { clientId: this.clientId }); - } - getStatus() { - return this.status; - } - // state transitions - createSession(session) { - const activeSession = this.sessions.get(session.to); - if (activeSession) { - const msg = `attempt to create session for ${session.to} but active session (${activeSession.id}) already exists`; - this.log?.error(msg, { - ...session.loggingMetadata, - tags: ["invariant-violation"] - }); - throw new Error(msg); - } - this.sessions.set(session.to, session); - this.eventDispatcher.dispatchEvent("sessionStatus", { - status: "created", - session - }); - this.eventDispatcher.dispatchEvent("sessionTransition", { - state: session.state, - id: session.id - }); - } - updateSession(session) { - const activeSession = this.sessions.get(session.to); - if (!activeSession) { - const msg = `attempt to transition session for ${session.to} but no active session exists`; - this.log?.error(msg, { - ...session.loggingMetadata, - tags: ["invariant-violation"] - }); - throw new Error(msg); - } - if (activeSession.id !== session.id) { - const msg = `attempt to transition active session for ${session.to} but active session (${activeSession.id}) is different from handle (${session.id})`; - this.log?.error(msg, { - ...session.loggingMetadata, - tags: ["invariant-violation"] - }); - throw new Error(msg); - } - this.sessions.set(session.to, session); - this.eventDispatcher.dispatchEvent("sessionTransition", { - state: session.state, - id: session.id - }); - } - deleteSession(session, options) { - if (session._isConsumed) return; - const loggingMetadata = session.loggingMetadata; - if (loggingMetadata.tags && options?.unhealthy) { - loggingMetadata.tags.push("unhealthy-session"); - } - session.log?.info(`closing session ${session.id}`, loggingMetadata); - this.eventDispatcher.dispatchEvent("sessionStatus", { - status: "closing", - session - }); - const to = session.to; - session.close(); - this.sessions.delete(to); - this.eventDispatcher.dispatchEvent("sessionStatus", { - status: "closed", - session: { id: session.id, to } - }); - } - // common listeners - onSessionGracePeriodElapsed(session) { - this.log?.info( - `session to ${session.to} grace period elapsed, closing`, - session.loggingMetadata - ); - this.deleteSession(session); - } - onConnectingFailed(session) { - const noConnectionSession = SessionStateGraph.transition.ConnectingToNoConnection(session, { - onSessionGracePeriodElapsed: () => { - this.onSessionGracePeriodElapsed(noConnectionSession); - } - }); - this.updateSession(noConnectionSession); - return noConnectionSession; - } - onConnClosed(session) { - let noConnectionSession; - if (session.state === "Handshaking" /* Handshaking */) { - noConnectionSession = SessionStateGraph.transition.HandshakingToNoConnection(session, { - onSessionGracePeriodElapsed: () => { - this.onSessionGracePeriodElapsed(noConnectionSession); - } - }); - } else { - noConnectionSession = SessionStateGraph.transition.ConnectedToNoConnection(session, { - onSessionGracePeriodElapsed: () => { - this.onSessionGracePeriodElapsed(noConnectionSession); - } - }); - } - this.updateSession(noConnectionSession); - return noConnectionSession; - } - /** - * Gets a send closure scoped to a specific session. Sending using the returned - * closure after the session has transitioned to a different state will be a noop. - * - * Session objects themselves can become stale as they transition between - * states. As stale sessions cannot be used again (and will throw), holding - * onto a session object is not recommended. - */ - getSessionBoundSendFn(to, sessionId) { - if (this.getStatus() !== "open") { - throw new Error("cannot get a bound send function on a closed transport"); - } - return (msg) => { - const session = this.sessions.get(to); - if (!session) { - throw new Error( - `session scope for ${sessionId} has ended (close), can't send` - ); - } - const sameSession = session.id === sessionId; - if (!sameSession || session._isConsumed) { - throw new Error( - `session scope for ${sessionId} has ended (transition), can't send` - ); - } - const res = session.send(msg); - if (!res.ok) { - throw new Error(res.reason); - } - return res.value; - }; - } -}; - -// transport/server.ts -import { Value as Value3 } from "@sinclair/typebox/value"; -var ServerTransport = class extends Transport { - /** - * The options for this transport. - */ - options; - /** - * Optional handshake options for the server. - */ - handshakeExtensions; - /** - * A map of session handshake data for each session. - */ - sessionHandshakeMetadata = /* @__PURE__ */ new Map(); - sessions = /* @__PURE__ */ new Map(); - pendingSessions = /* @__PURE__ */ new Set(); - constructor(clientId, providedOptions) { - super(clientId, providedOptions); - this.sessions = /* @__PURE__ */ new Map(); - this.options = { - ...defaultServerTransportOptions, - ...providedOptions - }; - this.log?.info(`initiated server transport`, { - clientId: this.clientId, - protocolVersion: currentProtocolVersion - }); - } - extendHandshake(options) { - this.handshakeExtensions = options; - } - deletePendingSession(pendingSession) { - pendingSession.close(); - this.pendingSessions.delete(pendingSession); - } - deleteSession(session, options) { - this.sessionHandshakeMetadata.delete(session.to); - super.deleteSession(session, options); - } - handleConnection(conn) { - if (this.getStatus() !== "open") return; - this.log?.info(`new incoming connection`, { - ...conn.loggingMetadata, - clientId: this.clientId - }); - let receivedHandshake = false; - const pendingSession = ServerSessionStateGraph.entrypoint( - this.clientId, - conn, - { - onConnectionClosed: () => { - this.log?.warn( - `connection from unknown closed before handshake finished`, - pendingSession.loggingMetadata - ); - this.deletePendingSession(pendingSession); - }, - onConnectionErrored: (err) => { - const errorString = coerceErrorString(err); - this.log?.warn( - `connection from unknown errored before handshake finished: ${errorString}`, - pendingSession.loggingMetadata - ); - this.deletePendingSession(pendingSession); - }, - onHandshakeTimeout: () => { - this.log?.warn( - `connection from unknown timed out before handshake finished`, - pendingSession.loggingMetadata - ); - this.deletePendingSession(pendingSession); - }, - onHandshake: (msg) => { - if (receivedHandshake) { - this.log?.error( - `received multiple handshake messages from pending session`, - { - ...pendingSession.loggingMetadata, - connectedTo: msg.from, - transportMessage: msg - } - ); - this.deletePendingSession(pendingSession); - return; - } - receivedHandshake = true; - void this.onHandshakeRequest(pendingSession, msg); - }, - onInvalidHandshake: (reason, code) => { - this.log?.error( - `invalid handshake: ${reason}`, - pendingSession.loggingMetadata - ); - this.deletePendingSession(pendingSession); - this.protocolError({ - type: ProtocolError.HandshakeFailed, - code, - message: reason - }); - } - }, - this.options, - this.tracer, - this.log - ); - this.pendingSessions.add(pendingSession); - } - rejectHandshakeRequest(session, to, reason, code, metadata) { - session.conn.telemetry?.span.setStatus({ - code: SpanStatusCode.ERROR, - message: reason - }); - this.log?.warn(reason, metadata); - const responseMsg = handshakeResponseMessage({ - from: this.clientId, - to, - status: { - ok: false, - code, - reason - } - }); - const res = session.sendHandshake(responseMsg); - if (!res.ok) { - this.log?.error(`failed to send handshake response: ${res.reason}`, { - ...session.loggingMetadata, - transportMessage: responseMsg - }); - this.protocolError({ - type: ProtocolError.MessageSendFailure, - message: res.reason - }); - this.deletePendingSession(session); - return; - } - this.protocolError({ - type: ProtocolError.HandshakeFailed, - code, - message: reason - }); - this.deletePendingSession(session); - } - async onHandshakeRequest(session, msg) { - if (!Value3.Check(ControlMessageHandshakeRequestSchema, msg.payload)) { - this.rejectHandshakeRequest( - session, - msg.from, - "received invalid handshake request", - "MALFORMED_HANDSHAKE", - { - ...session.loggingMetadata, - transportMessage: msg, - connectedTo: msg.from, - validationErrors: [ - ...Value3.Errors(ControlMessageHandshakeRequestSchema, msg.payload) - ] - } - ); - return; - } - const gotVersion = msg.payload.protocolVersion; - if (!isAcceptedProtocolVersion(gotVersion)) { - this.rejectHandshakeRequest( - session, - msg.from, - `expected protocol version oneof [${acceptedProtocolVersions.toString()}], got ${gotVersion}`, - "PROTOCOL_VERSION_MISMATCH", - { - ...session.loggingMetadata, - connectedTo: msg.from, - transportMessage: msg - } - ); - return; - } - let parsedMetadata = {}; - if (this.handshakeExtensions) { - if (!Value3.Check(this.handshakeExtensions.schema, msg.payload.metadata)) { - this.rejectHandshakeRequest( - session, - msg.from, - "received malformed handshake metadata", - "MALFORMED_HANDSHAKE_META", - { - ...session.loggingMetadata, - connectedTo: msg.from, - validationErrors: [ - ...Value3.Errors( - this.handshakeExtensions.schema, - msg.payload.metadata - ) - ] - } - ); - return; - } - const previousParsedMetadata = this.sessionHandshakeMetadata.get( - msg.from - ); - const parsedMetadataOrFailureCode = await this.handshakeExtensions.validate( - msg.payload.metadata, - previousParsedMetadata - ); - if (session._isConsumed) { - return; - } - if (Value3.Check( - HandshakeErrorCustomHandlerFatalResponseCodes, - parsedMetadataOrFailureCode - )) { - this.rejectHandshakeRequest( - session, - msg.from, - "rejected by handshake handler", - parsedMetadataOrFailureCode, - { - ...session.loggingMetadata, - connectedTo: msg.from, - clientId: this.clientId - } - ); - return; - } - parsedMetadata = parsedMetadataOrFailureCode; - } - let connectCase = "new session"; - const clientNextExpectedSeq = msg.payload.expectedSessionState.nextExpectedSeq; - const clientNextSentSeq = msg.payload.expectedSessionState.nextSentSeq; - let oldSession = this.sessions.get(msg.from); - if (this.options.enableTransparentSessionReconnects && oldSession && oldSession.id === msg.payload.sessionId) { - connectCase = "transparent reconnection"; - const ourNextSeq = oldSession.nextSeq(); - const ourAck = oldSession.ack; - if (clientNextSentSeq > ourAck) { - this.rejectHandshakeRequest( - session, - msg.from, - `client is in the future: server wanted next message to be ${ourAck} but client would have sent ${clientNextSentSeq}`, - "SESSION_STATE_MISMATCH", - { - ...session.loggingMetadata, - connectedTo: msg.from, - transportMessage: msg - } - ); - return; - } - if (ourNextSeq > clientNextExpectedSeq) { - this.rejectHandshakeRequest( - session, - msg.from, - `server is in the future: client wanted next message to be ${clientNextExpectedSeq} but server would have sent ${ourNextSeq}`, - "SESSION_STATE_MISMATCH", - { - ...session.loggingMetadata, - connectedTo: msg.from, - transportMessage: msg - } - ); - return; - } - if (oldSession.state !== "NoConnection" /* NoConnection */) { - const noConnectionSession = ServerSessionStateGraph.transition.ConnectedToNoConnection( - oldSession, - { - onSessionGracePeriodElapsed: () => { - this.onSessionGracePeriodElapsed(noConnectionSession); - } - } - ); - oldSession = noConnectionSession; - this.updateSession(oldSession); - } - } else if (oldSession) { - connectCase = "hard reconnection"; - this.log?.info( - `client is reconnecting to a new session (${msg.payload.sessionId}) with an old session (${oldSession.id}) already existing, closing old session`, - { - ...session.loggingMetadata, - connectedTo: msg.from, - sessionId: msg.payload.sessionId - } - ); - this.deleteSession(oldSession); - oldSession = void 0; - } - if (!oldSession && (clientNextSentSeq > 0 || clientNextExpectedSeq > 0)) { - connectCase = "unknown session"; - const rejectionMessage = this.options.enableTransparentSessionReconnects ? `client is trying to reconnect to a session the server don't know about: ${msg.payload.sessionId}` : `client is attempting a transparent reconnect to a session but the server does not support it: ${msg.payload.sessionId}`; - this.rejectHandshakeRequest( - session, - msg.from, - rejectionMessage, - "SESSION_STATE_MISMATCH", - { - ...session.loggingMetadata, - connectedTo: msg.from, - transportMessage: msg - } - ); - return; - } - const sessionId = msg.payload.sessionId; - this.log?.info( - `handshake from ${msg.from} ok (${connectCase}), responding with handshake success`, - { - ...session.loggingMetadata, - connectedTo: msg.from - } - ); - const responseMsg = handshakeResponseMessage({ - from: this.clientId, - to: msg.from, - status: { - ok: true, - sessionId - } - }); - const res = session.sendHandshake(responseMsg); - if (!res.ok) { - this.log?.error(`failed to send handshake response: ${res.reason}`, { - ...session.loggingMetadata, - transportMessage: responseMsg - }); - this.protocolError({ - type: ProtocolError.MessageSendFailure, - message: res.reason - }); - this.deletePendingSession(session); - return; - } - this.pendingSessions.delete(session); - const connectedSession = ServerSessionStateGraph.transition.WaitingForHandshakeToConnected( - session, - // by this point oldSession is either no connection or we dont have an old session - oldSession, - sessionId, - msg.from, - msg.tracing, - { - onConnectionErrored: (err) => { - const errStr = coerceErrorString(err); - this.log?.warn( - `connection to ${connectedSession.to} errored: ${errStr}`, - connectedSession.loggingMetadata - ); - }, - onConnectionClosed: () => { - this.log?.info( - `connection to ${connectedSession.to} closed`, - connectedSession.loggingMetadata - ); - this.onConnClosed(connectedSession); - }, - onMessage: (msg2) => { - this.handleMsg(msg2); - }, - onInvalidMessage: (reason) => { - this.log?.error(`invalid message: ${reason}`, { - ...connectedSession.loggingMetadata, - transportMessage: msg - }); - this.protocolError({ - type: ProtocolError.InvalidMessage, - message: reason - }); - this.deleteSession(connectedSession, { unhealthy: true }); - }, - onMessageSendFailure: (msg2, reason) => { - this.log?.error(`failed to send message: ${reason}`, { - ...connectedSession.loggingMetadata, - transportMessage: msg2 - }); - this.protocolError({ - type: ProtocolError.MessageSendFailure, - message: reason - }); - this.deleteSession(connectedSession, { unhealthy: true }); - } - }, - gotVersion - ); - const bufferSendRes = connectedSession.sendBufferedMessages(); - if (!bufferSendRes.ok) { - return; - } - this.sessionHandshakeMetadata.set(connectedSession.to, parsedMetadata); - if (oldSession) { - this.updateSession(connectedSession); - } else { - this.createSession(connectedSession); - } - connectedSession.startActiveHeartbeat(); - } -}; - -// transport/impls/ws/server.ts -function cleanHeaders(headers) { - const cleanedHeaders = {}; - for (const [key, value] of Object.entries(headers)) { - if (!key.startsWith("sec-") && value) { - const cleanedValue = Array.isArray(value) ? value[0] : value; - cleanedHeaders[key] = cleanedValue; - } - } - return cleanedHeaders; -} -var WebSocketServerTransport = class extends ServerTransport { - wss; - constructor(wss, clientId, providedOptions) { - super(clientId, providedOptions); - this.wss = wss; - this.wss.on("connection", this.connectionHandler); - } - connectionHandler = (ws, req) => { - const conn = new WebSocketConnection(ws, { - headers: cleanHeaders(req.headersDistinct) - }); - this.handleConnection(conn); - }; - close() { - super.close(); - this.wss.off("connection", this.connectionHandler); - } -}; - -// python-client/tests/test_server_handshake.ts -import { Type as Type6 } from "@sinclair/typebox"; -var ServiceSchema = createServiceSchema(); -var HandshakeTestServiceSchema = ServiceSchema.define({ - echo: Procedure.rpc({ - requestInit: Type6.Object({ msg: Type6.String() }), - responseData: Type6.Object({ response: Type6.String() }), - responseError: Type6.Never(), - async handler({ reqInit }) { - return Ok({ response: reqInit.msg }); - } - }) -}); -var services = { - test: HandshakeTestServiceSchema -}; -var handshakeSchema = Type6.Object({ token: Type6.String() }); -async function main() { - const httpServer = http.createServer(); - const port = await new Promise((resolve, reject) => { - httpServer.listen(0, "127.0.0.1", () => { - const addr = httpServer.address(); - if (typeof addr === "object" && addr) resolve(addr.port); - else reject(new Error("couldn't get port")); - }); - }); - const wss = new WebSocketServer({ server: httpServer }); - const serverTransport = new WebSocketServerTransport( - wss, - "HANDSHAKE_SERVER" - ); - const _server = createServer(serverTransport, services, { - handshakeOptions: createServerHandshakeOptions( - handshakeSchema, - (metadata) => { - if (metadata.token !== "valid-token") { - return "REJECTED_BY_CUSTOM_HANDLER"; - } - return {}; - } - ) - }); - process.stdout.write(`RIVER_PORT=${port} -`); - process.on("SIGTERM", () => { - void _server.close().then(() => { - httpServer.close(); - process.exit(0); - }); - }); - process.on("SIGINT", () => { - void _server.close().then(() => { - httpServer.close(); - process.exit(0); - }); - }); -} -main().catch((err) => { - console.error("Failed to start handshake test server:", err); - process.exit(1); -}); From 3443957e2338ed946f052c104f8dbd995289512b Mon Sep 17 00:00:00 2001 From: Jacky Zhao Date: Mon, 2 Mar 2026 19:24:10 -0800 Subject: [PATCH 13/29] fixes --- python-client/river/session.py | 6 +- python-client/river/streams.py | 2 +- python-client/river/transport.py | 21 +++- python-client/tests/test_e2e.py | 167 ++++++++++++++++++++++++++++++- 4 files changed, 184 insertions(+), 12 deletions(-) diff --git a/python-client/river/session.py b/python-client/river/session.py index 7a363b7d..507b6dde 100644 --- a/python-client/river/session.py +++ b/python-client/river/session.py @@ -141,10 +141,8 @@ def send(self, partial: PartialTransportMessage) -> tuple[bool, str]: if self.state == SessionState.CONNECTED and self._ws is not None: ok, result = self._send_over_wire(msg) if not ok: - # Roll back: remove the unsendable message from the buffer - # and restore seq so subsequent messages don't have a gap. - self.send_buffer = [m for m in self.send_buffer if m.id != msg.id] - self.seq = msg.seq # restore to the seq we consumed + # Send failure is fatal — the caller (transport) + # is expected to destroy the session. return False, result return True, msg.id diff --git a/python-client/river/streams.py b/python-client/river/streams.py index 9ae18736..ebe8c6b2 100644 --- a/python-client/river/streams.py +++ b/python-client/river/streams.py @@ -214,7 +214,7 @@ def close(self, value: T | None = None) -> None: if value is not None: self._write_cb(value) self._closed = True - # Nullify callbacks after invocation to prevent reuse (matches TS) + # Nullify callbacks after invocation to prevent reuse self._write_cb = lambda _: None # type: ignore[assignment] if self._close_cb: self._close_cb() diff --git a/python-client/river/transport.py b/python-client/river/transport.py index f17a06c0..1c567659 100644 --- a/python-client/river/transport.py +++ b/python-client/river/transport.py @@ -280,9 +280,14 @@ async def _do_handshake(self, session: Session, ws: Any, to: str) -> None: hs_msg = session.create_handshake_request(metadata=self._handshake_metadata) ok, buf = self._codec_adapter.to_buffer(hs_msg) if not ok: + # Handshake send failure is fatal — destroy session logger.error("Failed to encode handshake: %s", buf) await ws.close() - self._on_connection_failed(to) + self._events.dispatch( + "protocolError", + {"type": "message_send_failure", "message": buf}, + ) + self._delete_session(to) return await ws.send(buf) @@ -303,9 +308,10 @@ async def _do_handshake(self, session: Session, ws: Any, to: str) -> None: ok, result = self._codec_adapter.from_buffer(response_bytes) if not ok: + # Invalid handshake response is fatal logger.error("Failed to decode handshake response: %s", result) await ws.close() - self._on_connection_failed(to) + self._delete_session(to) return response_msg: TransportMessage = result # type: ignore[assignment] @@ -313,9 +319,10 @@ async def _do_handshake(self, session: Session, ws: Any, to: str) -> None: # Validate handshake response if not isinstance(payload, dict) or payload.get("type") != "HANDSHAKE_RESP": + # Invalid handshake schema is fatal logger.error("Invalid handshake response payload") await ws.close() - self._on_connection_failed(to) + self._delete_session(to) return status = payload.get("status", {}) @@ -411,10 +418,12 @@ def _on_message_data(self, session: Session, raw: bytes, to: str) -> None: """Handle raw bytes received from the WebSocket.""" ok, result = self._codec_adapter.from_buffer(raw) if not ok: + # Invalid message is fatal — destroy the session self._events.dispatch( "protocolError", {"type": "invalid_message", "message": result}, ) + self._delete_session(to) return msg: TransportMessage = result # type: ignore[assignment] @@ -506,6 +515,12 @@ def _send(msg: PartialTransportMessage) -> str: ok, result = session.send(msg) if not ok: + # Send failure is fatal — destroy session + self._events.dispatch( + "protocolError", + {"type": "message_send_failure", "message": result}, + ) + self._delete_session(to) raise RuntimeError(f"Send failed: {result}") return result diff --git a/python-client/tests/test_e2e.py b/python-client/tests/test_e2e.py index 81272a7d..dd09677e 100644 --- a/python-client/tests/test_e2e.py +++ b/python-client/tests/test_e2e.py @@ -1405,12 +1405,12 @@ async def test_finalize_after_explicit_close(self, server_url: str): class TestProtocolConformance: - """Tests verifying Python client matches TS protocol behavior.""" + """Tests verifying protocol-level conformance.""" def test_handshake_stream_id_is_random(self): """Handshake streamId should be a random ID, not a fixed string. - TS uses generateId() for handshake streamId; Python must match. + The protocol requires a random streamId for handshakes. """ from river.codec import CodecMessageAdapter, NaiveJsonCodec from river.session import Session @@ -1446,7 +1446,7 @@ def test_readable_push_after_break_is_noop(self): def test_writable_close_nullifies_callbacks(self): """After close(), write/close callbacks should not be invocable. - TS nullifies callbacks after close to prevent reuse. + Callbacks should be nullified after close to prevent reuse. """ from river.streams import Writable @@ -1467,7 +1467,7 @@ def test_writable_close_nullifies_callbacks(self): assert close_count[0] == 1 def test_heartbeat_stream_id_is_fixed(self): - """Heartbeat streamId should be 'heartbeat' (matching TS).""" + """Heartbeat streamId should be the fixed string 'heartbeat'.""" from river.types import heartbeat_message hb = heartbeat_message() @@ -1501,3 +1501,162 @@ def test_handshake_payload_omits_metadata_when_none(self): metadata=None, ) assert "metadata" not in payload + + +class TestFatalErrorPaths: + """Regression tests for fatal error paths that must destroy the session. + + Certain errors are not retryable and must immediately destroy + the session. + """ + + def test_failed_send_destroys_session(self): + """Send failure on a connected session destroys it.""" + from unittest.mock import AsyncMock + + from river.codec import CodecMessageAdapter, NaiveJsonCodec + from river.session import Session, SessionState + from river.transport import WebSocketClientTransport + + transport = WebSocketClientTransport( + ws_url="ws://127.0.0.1:1", + client_id="client", + server_id="server", + codec=NaiveJsonCodec(), + ) + codec = CodecMessageAdapter(NaiveJsonCodec()) + session = Session("s1", "client", "server", codec) + session.state = SessionState.CONNECTED + session._ws = AsyncMock() + transport.sessions["server"] = session + + send_fn = transport.get_session_bound_send_fn("server", "s1") + + # A payload that can't be serialized (set is not JSON-serializable) + from river.types import PartialTransportMessage + + try: + send_fn( + PartialTransportMessage( + payload={"bad": {1, 2}}, + stream_id="x", + control_flags=0, + ) + ) + except RuntimeError: + pass + + # Session must be destroyed + assert transport.sessions.get("server") is None + + def test_failed_send_seq_consumed(self): + """Send failure does not roll back seq. + + The seq is consumed and the session is destroyed instead. + """ + from unittest.mock import AsyncMock + + from river.codec import CodecMessageAdapter, NaiveJsonCodec + from river.session import Session, SessionState + from river.types import PartialTransportMessage + + codec = CodecMessageAdapter(NaiveJsonCodec()) + session = Session("s1", "client", "server", codec) + session.state = SessionState.CONNECTED + session._ws = AsyncMock() + + initial_seq = session.seq + + ok, _ = session.send( + PartialTransportMessage( + payload={"bad": {1, 2}}, + stream_id="x", + control_flags=0, + ) + ) + + assert not ok + # seq was consumed (not rolled back) + assert session.seq == initial_seq + 1 + + def test_invalid_message_destroys_session(self): + """Receiving a corrupt message destroys the session.""" + from river.codec import CodecMessageAdapter, NaiveJsonCodec + from river.session import Session, SessionState + from river.transport import WebSocketClientTransport + + transport = WebSocketClientTransport( + ws_url="ws://127.0.0.1:1", + client_id="client", + server_id="server", + codec=NaiveJsonCodec(), + ) + codec = CodecMessageAdapter(NaiveJsonCodec()) + session = Session("s1", "client", "server", codec) + session.state = SessionState.CONNECTED + transport.sessions["server"] = session + + errors: list[dict] = [] + transport.add_event_listener("protocolError", lambda e: errors.append(e)) + + # Feed garbage bytes + transport._on_message_data(session, b"not valid json", "server") + + # Session must be destroyed + assert transport.sessions.get("server") is None + assert len(errors) == 1 + assert errors[0]["type"] == "invalid_message" + + def test_readable_broken_after_async_for_break(self): + """Breaking out of async for marks readable as broken.""" + from river.streams import Readable + + r: Readable = Readable() + r._push_value({"ok": True, "payload": 1}) + + # Simulate what async for + break does: create iterator, get + # one value, then let the iterator be GC'd + it = r.__aiter__() + # The __del__ should mark broken + del it + + assert r._broken + # Subsequent pushes should be no-ops + r._push_value({"ok": True, "payload": 2}) + assert not r._has_values_in_queue() + + def test_frozen_session_options(self): + """SessionOptions is frozen — mutation raises.""" + from river.session import SessionOptions + + opts = SessionOptions() + try: + opts.heartbeat_interval_ms = 999 # type: ignore[misc] + raise AssertionError("should have raised FrozenInstanceError") + except AttributeError: + pass # frozen dataclass raises AttributeError on mutation + + def test_json_codec_large_int_encoding(self): + """Large ints beyond JS safe integer range are encoded as $b.""" + from river.codec import NaiveJsonCodec + + codec = NaiveJsonCodec() + large = 2**53 + 1 + buf = codec.to_buffer({"n": large}) + decoded = codec.from_buffer(buf) + assert decoded["n"] == large + + # Normal ints should NOT be encoded as $b + buf2 = codec.to_buffer({"n": 42}) + raw = buf2.decode("utf-8") + assert "$b" not in raw + + def test_json_codec_negative_large_int(self): + """Negative large ints are also encoded as $b.""" + from river.codec import NaiveJsonCodec + + codec = NaiveJsonCodec() + large_neg = -(2**53 + 1) + buf = codec.to_buffer({"n": large_neg}) + decoded = codec.from_buffer(buf) + assert decoded["n"] == large_neg From 41938fc316607240bedb967b2d5942a4027ece1c Mon Sep 17 00:00:00 2001 From: Jacky Zhao Date: Mon, 2 Mar 2026 19:45:03 -0800 Subject: [PATCH 14/29] more fixes --- python-client/river/codec.py | 28 +++++++++++++++-- python-client/river/streams.py | 10 +++++++ python-client/tests/test_e2e.py | 53 +++++++++++++++++++++++++++++++++ 3 files changed, 89 insertions(+), 2 deletions(-) diff --git a/python-client/river/codec.py b/python-client/river/codec.py index 826cc7b3..b2fcebc9 100644 --- a/python-client/river/codec.py +++ b/python-client/river/codec.py @@ -67,6 +67,11 @@ def from_buffer(self, buf: bytes) -> dict[str, Any]: return json.loads(buf.decode("utf-8"), object_hook=_custom_object_hook) +_BIGINT_EXT_TYPE = 0 +_MSGPACK_INT_MAX = 2**64 - 1 +_MSGPACK_INT_MIN = -(2**63) + + class BinaryCodec(Codec): """Codec using msgpack serialization (matches TypeScript BinaryCodec).""" @@ -75,12 +80,31 @@ class BinaryCodec(Codec): def to_buffer(self, obj: dict[str, Any]) -> bytes: import msgpack # type: ignore[import-untyped] - return msgpack.packb(obj, use_bin_type=True) + return msgpack.packb(obj, use_bin_type=True, default=self._ext_encode) def from_buffer(self, buf: bytes) -> dict[str, Any]: import msgpack # type: ignore[import-untyped] - return msgpack.unpackb(buf, raw=False) + return msgpack.unpackb(buf, raw=False, ext_hook=self._ext_decode) + + @staticmethod + def _ext_encode(obj: Any) -> Any: + import msgpack # type: ignore[import-untyped] + + if isinstance(obj, int) and (obj > _MSGPACK_INT_MAX or obj < _MSGPACK_INT_MIN): + # Encode as string in extension type 0 (matches TS BigInt ext) + data = msgpack.packb(str(obj), use_bin_type=True) + return msgpack.ExtType(_BIGINT_EXT_TYPE, data) + raise TypeError(f"Unknown type: {type(obj)}") + + @staticmethod + def _ext_decode(code: int, data: bytes) -> Any: + import msgpack # type: ignore[import-untyped] + + if code == _BIGINT_EXT_TYPE: + val = msgpack.unpackb(data, raw=False) + return int(val) + return msgpack.ExtType(code, data) class CodecMessageAdapter: diff --git a/python-client/river/streams.py b/python-client/river/streams.py index ebe8c6b2..889d3a13 100644 --- a/python-client/river/streams.py +++ b/python-client/river/streams.py @@ -25,6 +25,7 @@ def __init__(self) -> None: self._closed = False self._broken = False self._locked = False + self._locked_by_consumer = False # locked by collect() or __aiter__ self._waiters: list[asyncio.Future[None]] = [] def _push_value(self, value: T) -> None: @@ -87,6 +88,7 @@ async def collect(self) -> list[T]: if self._locked: raise TypeError("Readable is already locked") self._locked = True + self._locked_by_consumer = True results: list[T] = [] async for item in self._iterate(): results.append(item) @@ -98,6 +100,8 @@ async def next(self) -> tuple[bool, T | None]: Returns (False, value) if a value is available. Returns (True, None) if the stream is done. """ + if self._locked_by_consumer: + raise TypeError("Readable is already locked") async for item in self._iterate(): return False, item return True, None @@ -133,6 +137,7 @@ def __aiter__(self): if self._locked: raise TypeError("Readable is already locked") self._locked = True + self._locked_by_consumer = True return _ReadableIterator(self) @@ -184,6 +189,11 @@ def __del__(self): # Synchronous cleanup when the iterator is GC'd (e.g. break in for-await) self._readable._broken = True self._readable._queue.clear() + # Wake any pending waiters so they don't block forever + for w in self._readable._waiters: + if not w.done(): + w.set_result(None) + self._readable._waiters.clear() class Writable(Generic[T]): diff --git a/python-client/tests/test_e2e.py b/python-client/tests/test_e2e.py index dd09677e..48b0b868 100644 --- a/python-client/tests/test_e2e.py +++ b/python-client/tests/test_e2e.py @@ -1660,3 +1660,56 @@ def test_json_codec_negative_large_int(self): buf = codec.to_buffer({"n": large_neg}) decoded = codec.from_buffer(buf) assert decoded["n"] == large_neg + + def test_binary_codec_large_int(self): + """Binary codec handles ints beyond msgpack native range.""" + from river.codec import BinaryCodec + + codec = BinaryCodec() + large = 10**30 + buf = codec.to_buffer({"n": large}) + decoded = codec.from_buffer(buf) + assert decoded["n"] == large + + @pytest.mark.asyncio + async def test_next_rejects_after_aiter_lock(self): + """next() raises TypeError if stream is locked by __aiter__.""" + from river.streams import Readable + + r: Readable = Readable() + r._push_value({"ok": True, "payload": 1}) + _it = r.__aiter__() # locks by consumer + + with pytest.raises(TypeError, match="already locked"): + await r.next() + + @pytest.mark.asyncio + async def test_next_rejects_after_collect_lock(self): + """next() raises TypeError if stream is locked by collect().""" + from river.streams import Readable + + r: Readable = Readable() + r._push_value({"ok": True, "payload": 1}) + r._trigger_close() + + await r.collect() # locks by consumer + with pytest.raises(TypeError, match="already locked"): + await r.next() + + @pytest.mark.asyncio + async def test_iterator_del_marks_broken_and_wakes(self): + """Dropping an iterator marks the stream as broken and wakes waiters.""" + from river.streams import Readable + + r: Readable = Readable() + r._push_value({"ok": True, "payload": 1}) + + # Iterate and break out + async for _item in r: + break + + # Stream should be broken (iterator __del__ ran) + assert r._broken + # Push after break should be a no-op + r._push_value({"ok": True, "payload": 2}) + assert not r._has_values_in_queue() From 630f0e86de03cbb4f122d9eaa16bdf25aa79d132 Mon Sep 17 00:00:00 2001 From: Jacky Zhao Date: Mon, 2 Mar 2026 20:39:54 -0800 Subject: [PATCH 15/29] more fixes --- .github/workflows/ci.yaml | 12 +- python-client/pyproject.toml | 4 + python-client/river/__init__.py | 19 +- python-client/river/client.py | 43 +++-- python-client/river/codec.py | 8 +- python-client/river/codegen/emitter.py | 36 ++-- python-client/river/codegen/schema.py | 19 +- .../codegen/templates/service_client.py.j2 | 80 ++------- python-client/river/session.py | 11 +- python-client/river/streams.py | 2 +- python-client/river/transport.py | 9 +- .../tests/generated/cancel_client.py | 165 ++++-------------- .../tests/generated/fallible_client.py | 39 ++--- .../tests/generated/ordering_client.py | 17 +- .../tests/generated/subscribable_client.py | 31 ++-- python-client/tests/generated/test_client.py | 62 ++----- .../tests/generated/uploadable_client.py | 76 ++------ python-client/tests/test_codegen.py | 78 +++++++++ python-client/tests/test_e2e.py | 38 ++++ 19 files changed, 369 insertions(+), 380 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 41c24098..9641ced5 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -60,7 +60,7 @@ jobs: - name: Install Python dependencies working-directory: python-client - run: pip install -e ".[dev]" + run: pip install -e ".[dev]" ty - name: Python lint working-directory: python-client @@ -68,6 +68,16 @@ jobs: ruff check . ruff format --check . + - name: Python type check + working-directory: python-client + run: ty check river/ + - name: Python tests working-directory: python-client run: python -m pytest tests/ -v + + - name: Python type check generated clients + working-directory: python-client + run: | + ty check tests/generated/ + ty check tests/test_codegen.py diff --git a/python-client/pyproject.toml b/python-client/pyproject.toml index 8d6dc10e..09cac636 100644 --- a/python-client/pyproject.toml +++ b/python-client/pyproject.toml @@ -29,10 +29,14 @@ testpaths = ["tests"] [tool.ruff] target-version = "py310" +exclude = ["tests/generated"] [tool.ruff.lint] select = ["E", "F", "I", "W"] +[tool.ty.environment] +extra-paths = ["tests"] + [tool.setuptools.packages.find] include = ["river*"] diff --git a/python-client/river/__init__.py b/python-client/river/__init__.py index 1a0d3ddf..46faca65 100644 --- a/python-client/river/__init__.py +++ b/python-client/river/__init__.py @@ -1,6 +1,16 @@ -"""River protocol v2.0 Python client implementation.""" +"""River protocol v2.0 Python client implementation. -from river.client import RiverClient +This client was generated with the assistance of AI (Claude). +""" + +from river.client import ( + ErrResult, + OkResult, + RiverClient, + StreamResult, + SubscriptionResult, + UploadResult, +) from river.codec import BinaryCodec, NaiveJsonCodec from river.streams import Readable, Writable from river.transport import WebSocketClientTransport @@ -8,6 +18,11 @@ __all__ = [ "RiverClient", + "OkResult", + "ErrResult", + "StreamResult", + "UploadResult", + "SubscriptionResult", "WebSocketClientTransport", "NaiveJsonCodec", "BinaryCodec", diff --git a/python-client/river/client.py b/python-client/river/client.py index a2557651..ec99f441 100644 --- a/python-client/river/client.py +++ b/python-client/river/client.py @@ -9,7 +9,9 @@ import asyncio import logging from dataclasses import dataclass -from typing import Any, Callable +from typing import Any, Callable, Generic, Literal, TypeVar + +from typing_extensions import TypedDict from river.streams import Readable, Writable from river.transport import WebSocketClientTransport @@ -29,28 +31,43 @@ logger = logging.getLogger(__name__) +T = TypeVar("T") +TPayload = TypeVar("TPayload") -@dataclass -class RpcResult: - """Result of an RPC call.""" - ok: bool - payload: Any +class OkResult(TypedDict, Generic[TPayload]): + """Successful result from a procedure call.""" + + ok: Literal[True] + payload: TPayload + + +class ErrResult(TypedDict, Generic[TPayload]): + """Error result from a procedure call.""" + + ok: Literal[False] + payload: TPayload @dataclass -class StreamResult: - """Result of opening a stream procedure.""" +class StreamResult(Generic[T]): + """Result of opening a stream procedure. - req_writable: Writable + Generic over the input type ``T`` written to ``req_writable``. + """ + + req_writable: Writable[T] res_readable: Readable @dataclass -class UploadResult: - """Result of opening an upload procedure.""" +class UploadResult(Generic[T]): + """Result of opening an upload procedure. - req_writable: Writable + Generic over the input type ``T`` written to ``req_writable``. + """ + + req_writable: Writable[T] finalize: Callable[[], Any] # async callable returning RpcResult @@ -113,7 +130,7 @@ async def rpc( procedure_name: str, init: Any, abort_signal: asyncio.Event | None = None, - ) -> dict[str, Any]: + ) -> Any: """Invoke an RPC procedure. Returns the result dict: {"ok": True/False, "payload": ...} diff --git a/python-client/river/codec.py b/python-client/river/codec.py index b2fcebc9..e7c182c1 100644 --- a/python-client/river/codec.py +++ b/python-client/river/codec.py @@ -78,18 +78,18 @@ class BinaryCodec(Codec): name = "binary" def to_buffer(self, obj: dict[str, Any]) -> bytes: - import msgpack # type: ignore[import-untyped] + import msgpack return msgpack.packb(obj, use_bin_type=True, default=self._ext_encode) def from_buffer(self, buf: bytes) -> dict[str, Any]: - import msgpack # type: ignore[import-untyped] + import msgpack return msgpack.unpackb(buf, raw=False, ext_hook=self._ext_decode) @staticmethod def _ext_encode(obj: Any) -> Any: - import msgpack # type: ignore[import-untyped] + import msgpack if isinstance(obj, int) and (obj > _MSGPACK_INT_MAX or obj < _MSGPACK_INT_MIN): # Encode as string in extension type 0 (matches TS BigInt ext) @@ -99,7 +99,7 @@ def _ext_encode(obj: Any) -> Any: @staticmethod def _ext_decode(code: int, data: bytes) -> Any: - import msgpack # type: ignore[import-untyped] + import msgpack if code == _BIGINT_EXT_TYPE: val = msgpack.unpackb(data, raw=False) diff --git a/python-client/river/codegen/emitter.py b/python-client/river/codegen/emitter.py index 1db2c6b1..06e5c7f2 100644 --- a/python-client/river/codegen/emitter.py +++ b/python-client/river/codegen/emitter.py @@ -29,6 +29,19 @@ _env.filters["pascal"] = _to_pascal_case +def _result_type(proc) -> str: # noqa: ANN001 + """Build the typed result annotation for a procedure.""" + ok = f"OkResult[{proc.output_type.annotation}]" + if proc.error_type: + err = f"ErrResult[{proc.error_type.annotation} | ProtocolError]" + else: + err = "ErrResult[ProtocolError]" + return f"{ok} | {err}" + + +_env.filters["result_type"] = _result_type + + # --------------------------------------------------------------------------- # Helpers # --------------------------------------------------------------------------- @@ -51,6 +64,9 @@ def _collect_used_type_names(svc: ServiceDef, ir: SchemaIR) -> list[str]: _extract_names(proc.init_type.annotation, td_names, names) if proc.input_type: _extract_names(proc.input_type.annotation, td_names, names) + _extract_names(proc.output_type.annotation, td_names, names) + if proc.error_type: + _extract_names(proc.error_type.annotation, td_names, names) return sorted(names) @@ -110,22 +126,20 @@ def render_service_client(svc: ServiceDef, ir: SchemaIR, import_prefix: str) -> type_names = _collect_used_type_names(svc, ir) types_module = "._types" if import_prefix == "." else f"{import_prefix}_types" - needs_readable = any( - p.proc_type in ("stream", "subscription") for p in svc.procedures - ) - needs_writable = any(p.proc_type in ("stream", "upload") for p in svc.procedures) - - wrappers = [ - p for p in svc.procedures if p.proc_type in ("stream", "upload", "subscription") - ] + proc_types = {p.proc_type for p in svc.procedures} + has_rpc = "rpc" in proc_types + has_stream = "stream" in proc_types + has_upload = "upload" in proc_types + has_subscription = "subscription" in proc_types return _env.get_template("service_client.py.j2").render( service=svc, type_names=type_names, types_module=types_module, - needs_readable=needs_readable, - needs_writable=needs_writable, - wrappers=wrappers, + has_rpc=has_rpc, + has_stream=has_stream, + has_upload=has_upload, + has_subscription=has_subscription, ) diff --git a/python-client/river/codegen/schema.py b/python-client/river/codegen/schema.py index d61568a2..c9da7189 100644 --- a/python-client/river/codegen/schema.py +++ b/python-client/river/codegen/schema.py @@ -118,10 +118,23 @@ def _to_snake_case(s: str) -> str: def _safe_field_name(name: str) -> str: - """Ensure a field name is a valid Python identifier.""" - name = _sanitize_identifier(name) + """Ensure a field name is a valid Python identifier. + + Raises ValueError if the name requires sanitization that would + change it from its wire representation, since TypedDict keys must + match the dict keys sent on the wire. + """ + sanitized = _sanitize_identifier(name) + if sanitized != name: + raise ValueError( + f"schema property {name!r} is not a valid Python identifier " + f"and cannot be represented in a TypedDict" + ) if keyword.iskeyword(name): - return name + "_" + raise ValueError( + f"schema property {name!r} is a Python keyword " + f"and cannot be used as a TypedDict field" + ) return name diff --git a/python-client/river/codegen/templates/service_client.py.j2 b/python-client/river/codegen/templates/service_client.py.j2 index 0558c433..560993fa 100644 --- a/python-client/river/codegen/templates/service_client.py.j2 +++ b/python-client/river/codegen/templates/service_client.py.j2 @@ -5,14 +5,14 @@ from __future__ import annotations import asyncio from typing import Any -from river.client import RiverClient -{% if needs_readable and needs_writable %} -from river.streams import Readable, Writable -{% elif needs_readable %} -from river.streams import Readable -{% elif needs_writable %} -from river.streams import Writable -{% endif %} +from river.client import ( + ErrResult, + OkResult, + RiverClient, + StreamResult, + SubscriptionResult, + UploadResult, +) {% if type_names %} from {{ types_module }} import ( @@ -21,53 +21,8 @@ from {{ types_module }} import ( {% endfor %} ) {% endif %} -{% for proc in wrappers %} - - -{% if proc.proc_type == "stream" %} -class {{ service.class_name }}{{ proc.name | pascal }}StreamResult: - """Streaming result for ``{{ service.name }}.{{ proc.name }}``.""" - - def __init__(self, inner: Any) -> None: - self._inner = inner - - @property - def req_writable(self) -> Writable[{{ proc.input_type.annotation }}]: - """Writable stream for sending requests.""" - return self._inner.req_writable - - @property - def res_readable(self) -> Readable[dict[str, Any]]: - """Readable stream for receiving responses.""" - return self._inner.res_readable -{% elif proc.proc_type == "upload" %} -class {{ service.class_name }}{{ proc.name | pascal }}UploadResult: - """Upload result for ``{{ service.name }}.{{ proc.name }}``.""" - def __init__(self, inner: Any) -> None: - self._inner = inner - - @property - def req_writable(self) -> Writable[{{ proc.input_type.annotation }}]: - """Writable stream for sending requests.""" - return self._inner.req_writable - - async def finalize(self) -> dict[str, Any]: - """Finalize the upload and get the response.""" - return await self._inner.finalize() -{% elif proc.proc_type == "subscription" %} -class {{ service.class_name }}{{ proc.name | pascal }}SubscriptionResult: - """Subscription result for ``{{ service.name }}.{{ proc.name }}``.""" - - def __init__(self, inner: Any) -> None: - self._inner = inner - - @property - def res_readable(self) -> Readable[dict[str, Any]]: - """Readable stream for receiving responses.""" - return self._inner.res_readable -{% endif %} -{% endfor %} +from ._errors import ProtocolError class {{ service.class_name }}Client: @@ -83,7 +38,7 @@ class {{ service.class_name }}Client: init: {{ proc.init_type.annotation }}, *, abort_signal: asyncio.Event | None = None, - ) -> dict[str, Any]: + ) -> {{ proc | result_type }}: {% if proc.description %} """{{ proc.description }}""" {% endif %} @@ -99,50 +54,47 @@ class {{ service.class_name }}Client: init: {{ proc.init_type.annotation }}, *, abort_signal: asyncio.Event | None = None, - ) -> {{ service.class_name }}{{ proc.name | pascal }}StreamResult: + ) -> StreamResult[{{ proc.input_type.annotation }}]: {% if proc.description %} """{{ proc.description }}""" {% endif %} - result = self._client.stream( + return self._client.stream( "{{ service.name }}", "{{ proc.name }}", init, abort_signal=abort_signal, ) - return {{ service.class_name }}{{ proc.name | pascal }}StreamResult(result) {% elif proc.proc_type == "upload" %} def {{ proc.py_name }}( self, init: {{ proc.init_type.annotation }}, *, abort_signal: asyncio.Event | None = None, - ) -> {{ service.class_name }}{{ proc.name | pascal }}UploadResult: + ) -> UploadResult[{{ proc.input_type.annotation }}]: {% if proc.description %} """{{ proc.description }}""" {% endif %} - result = self._client.upload( + return self._client.upload( "{{ service.name }}", "{{ proc.name }}", init, abort_signal=abort_signal, ) - return {{ service.class_name }}{{ proc.name | pascal }}UploadResult(result) {% elif proc.proc_type == "subscription" %} def {{ proc.py_name }}( self, init: {{ proc.init_type.annotation }}, *, abort_signal: asyncio.Event | None = None, - ) -> {{ service.class_name }}{{ proc.name | pascal }}SubscriptionResult: + ) -> SubscriptionResult: {% if proc.description %} """{{ proc.description }}""" {% endif %} - result = self._client.subscribe( + return self._client.subscribe( "{{ service.name }}", "{{ proc.name }}", init, abort_signal=abort_signal, ) - return {{ service.class_name }}{{ proc.name | pascal }}SubscriptionResult(result) {% endif %} {% endfor %} diff --git a/python-client/river/session.py b/python-client/river/session.py index 507b6dde..0e16abc7 100644 --- a/python-client/river/session.py +++ b/python-client/river/session.py @@ -148,15 +148,16 @@ def send(self, partial: PartialTransportMessage) -> tuple[bool, str]: def _send_over_wire(self, msg: TransportMessage) -> tuple[bool, str]: """Serialize and send a message over the current connection.""" - ok, result = self.codec.to_buffer(msg) + ok, buf_or_err = self.codec.to_buffer(msg) if not ok: - return False, result # type: ignore[return-value] + assert isinstance(buf_or_err, str) + return False, buf_or_err + assert isinstance(buf_or_err, bytes) try: assert self._ws is not None # websockets library uses async send, but we schedule it - asyncio.get_event_loop().call_soon( - lambda data=result: self._do_ws_send(data) - ) + buf = buf_or_err + asyncio.get_event_loop().call_soon(lambda data=buf: self._do_ws_send(data)) return True, msg.id except Exception as e: return False, f"Failed to send: {e}" diff --git a/python-client/river/streams.py b/python-client/river/streams.py index 889d3a13..a3238497 100644 --- a/python-client/river/streams.py +++ b/python-client/river/streams.py @@ -225,7 +225,7 @@ def close(self, value: T | None = None) -> None: self._write_cb(value) self._closed = True # Nullify callbacks after invocation to prevent reuse - self._write_cb = lambda _: None # type: ignore[assignment] + self._write_cb = lambda _: None if self._close_cb: self._close_cb() self._close_cb = None diff --git a/python-client/river/transport.py b/python-client/river/transport.py index 1c567659..e8130f68 100644 --- a/python-client/river/transport.py +++ b/python-client/river/transport.py @@ -143,7 +143,7 @@ def __init__( self.sessions: dict[str, Session] = {} # to_id -> Session self._events = EventDispatcher() self._retry_budget = LeakyBucketRateLimit() - self._reconnect_on_connection_drop = True + self._reconnect_on_connection_drop = self.options.enable_transparent_reconnects # Connection tasks self._connect_tasks: dict[str, asyncio.Task] = {} @@ -224,6 +224,7 @@ def connect(self, to: str | None = None) -> None: "protocolError", {"type": "conn_retry_exceeded", "message": "Retries exceeded"}, ) + self._delete_session(to) return backoff_ms = self._retry_budget.get_backoff_ms() @@ -262,7 +263,7 @@ async def _do_connect(): async def _create_connection(self, to: str) -> Any: """Create a new WebSocket connection.""" - import websockets # type: ignore[import-untyped] + import websockets url = self._ws_url if isinstance(self._ws_url, str) else self._ws_url(to) @@ -481,7 +482,11 @@ def _on_connection_failed(self, to: str) -> None: if session is None or session._destroyed: return + # Transition to NoConnection with grace period so the session + # is eventually destroyed if reconnect doesn't succeed. + loop = self._get_loop() session.state = SessionState.NO_CONNECTION + session.start_grace_period(loop) if self._reconnect_on_connection_drop: self._try_reconnecting(to) diff --git a/python-client/tests/generated/cancel_client.py b/python-client/tests/generated/cancel_client.py index b580c8df..4388a876 100644 --- a/python-client/tests/generated/cancel_client.py +++ b/python-client/tests/generated/cancel_client.py @@ -5,132 +5,42 @@ import asyncio from typing import Any -from river.client import RiverClient -from river.streams import Readable, Writable +from river.client import ( + ErrResult, + OkResult, + RiverClient, + StreamResult, + SubscriptionResult, + UploadResult, +) from ._types import ( CancelBlockingRpcInit, + CancelBlockingRpcOutput, CancelBlockingStreamInit, CancelBlockingStreamInput, + CancelBlockingStreamOutput, CancelBlockingSubscriptionInit, + CancelBlockingSubscriptionOutput, CancelBlockingUploadInit, CancelBlockingUploadInput, + CancelBlockingUploadOutput, CancelCountedStreamInit, CancelCountedStreamInput, + CancelCountedStreamOutput, CancelImmediateRpcInit, + CancelImmediateRpcOutput, CancelImmediateStreamInit, CancelImmediateStreamInput, + CancelImmediateStreamOutput, CancelImmediateSubscriptionInit, + CancelImmediateSubscriptionOutput, CancelImmediateUploadInit, CancelImmediateUploadInput, + CancelImmediateUploadOutput, ) - -class CancelBlockingStreamStreamResult: - """Streaming result for ``cancel.blockingStream``.""" - - def __init__(self, inner: Any) -> None: - self._inner = inner - - @property - def req_writable(self) -> Writable[CancelBlockingStreamInput]: - """Writable stream for sending requests.""" - return self._inner.req_writable - - @property - def res_readable(self) -> Readable[dict[str, Any]]: - """Readable stream for receiving responses.""" - return self._inner.res_readable - - -class CancelBlockingUploadUploadResult: - """Upload result for ``cancel.blockingUpload``.""" - - def __init__(self, inner: Any) -> None: - self._inner = inner - - @property - def req_writable(self) -> Writable[CancelBlockingUploadInput]: - """Writable stream for sending requests.""" - return self._inner.req_writable - - async def finalize(self) -> dict[str, Any]: - """Finalize the upload and get the response.""" - return await self._inner.finalize() - - -class CancelBlockingSubscriptionSubscriptionResult: - """Subscription result for ``cancel.blockingSubscription``.""" - - def __init__(self, inner: Any) -> None: - self._inner = inner - - @property - def res_readable(self) -> Readable[dict[str, Any]]: - """Readable stream for receiving responses.""" - return self._inner.res_readable - - -class CancelImmediateStreamStreamResult: - """Streaming result for ``cancel.immediateStream``.""" - - def __init__(self, inner: Any) -> None: - self._inner = inner - - @property - def req_writable(self) -> Writable[CancelImmediateStreamInput]: - """Writable stream for sending requests.""" - return self._inner.req_writable - - @property - def res_readable(self) -> Readable[dict[str, Any]]: - """Readable stream for receiving responses.""" - return self._inner.res_readable - - -class CancelImmediateUploadUploadResult: - """Upload result for ``cancel.immediateUpload``.""" - - def __init__(self, inner: Any) -> None: - self._inner = inner - - @property - def req_writable(self) -> Writable[CancelImmediateUploadInput]: - """Writable stream for sending requests.""" - return self._inner.req_writable - - async def finalize(self) -> dict[str, Any]: - """Finalize the upload and get the response.""" - return await self._inner.finalize() - - -class CancelImmediateSubscriptionSubscriptionResult: - """Subscription result for ``cancel.immediateSubscription``.""" - - def __init__(self, inner: Any) -> None: - self._inner = inner - - @property - def res_readable(self) -> Readable[dict[str, Any]]: - """Readable stream for receiving responses.""" - return self._inner.res_readable - - -class CancelCountedStreamStreamResult: - """Streaming result for ``cancel.countedStream``.""" - - def __init__(self, inner: Any) -> None: - self._inner = inner - - @property - def req_writable(self) -> Writable[CancelCountedStreamInput]: - """Writable stream for sending requests.""" - return self._inner.req_writable - - @property - def res_readable(self) -> Readable[dict[str, Any]]: - """Readable stream for receiving responses.""" - return self._inner.res_readable +from ._errors import ProtocolError class CancelClient: @@ -144,7 +54,7 @@ async def blocking_rpc( init: CancelBlockingRpcInit, *, abort_signal: asyncio.Event | None = None, - ) -> dict[str, Any]: + ) -> OkResult[CancelBlockingRpcOutput] | ErrResult[ProtocolError]: return await self._client.rpc( "cancel", "blockingRpc", @@ -157,49 +67,46 @@ def blocking_stream( init: CancelBlockingStreamInit, *, abort_signal: asyncio.Event | None = None, - ) -> CancelBlockingStreamStreamResult: - result = self._client.stream( + ) -> StreamResult[CancelBlockingStreamInput]: + return self._client.stream( "cancel", "blockingStream", init, abort_signal=abort_signal, ) - return CancelBlockingStreamStreamResult(result) def blocking_upload( self, init: CancelBlockingUploadInit, *, abort_signal: asyncio.Event | None = None, - ) -> CancelBlockingUploadUploadResult: - result = self._client.upload( + ) -> UploadResult[CancelBlockingUploadInput]: + return self._client.upload( "cancel", "blockingUpload", init, abort_signal=abort_signal, ) - return CancelBlockingUploadUploadResult(result) def blocking_subscription( self, init: CancelBlockingSubscriptionInit, *, abort_signal: asyncio.Event | None = None, - ) -> CancelBlockingSubscriptionSubscriptionResult: - result = self._client.subscribe( + ) -> SubscriptionResult: + return self._client.subscribe( "cancel", "blockingSubscription", init, abort_signal=abort_signal, ) - return CancelBlockingSubscriptionSubscriptionResult(result) async def immediate_rpc( self, init: CancelImmediateRpcInit, *, abort_signal: asyncio.Event | None = None, - ) -> dict[str, Any]: + ) -> OkResult[CancelImmediateRpcOutput] | ErrResult[ProtocolError]: return await self._client.rpc( "cancel", "immediateRpc", @@ -212,53 +119,49 @@ def immediate_stream( init: CancelImmediateStreamInit, *, abort_signal: asyncio.Event | None = None, - ) -> CancelImmediateStreamStreamResult: - result = self._client.stream( + ) -> StreamResult[CancelImmediateStreamInput]: + return self._client.stream( "cancel", "immediateStream", init, abort_signal=abort_signal, ) - return CancelImmediateStreamStreamResult(result) def immediate_upload( self, init: CancelImmediateUploadInit, *, abort_signal: asyncio.Event | None = None, - ) -> CancelImmediateUploadUploadResult: - result = self._client.upload( + ) -> UploadResult[CancelImmediateUploadInput]: + return self._client.upload( "cancel", "immediateUpload", init, abort_signal=abort_signal, ) - return CancelImmediateUploadUploadResult(result) def immediate_subscription( self, init: CancelImmediateSubscriptionInit, *, abort_signal: asyncio.Event | None = None, - ) -> CancelImmediateSubscriptionSubscriptionResult: - result = self._client.subscribe( + ) -> SubscriptionResult: + return self._client.subscribe( "cancel", "immediateSubscription", init, abort_signal=abort_signal, ) - return CancelImmediateSubscriptionSubscriptionResult(result) def counted_stream( self, init: CancelCountedStreamInit, *, abort_signal: asyncio.Event | None = None, - ) -> CancelCountedStreamStreamResult: - result = self._client.stream( + ) -> StreamResult[CancelCountedStreamInput]: + return self._client.stream( "cancel", "countedStream", init, abort_signal=abort_signal, ) - return CancelCountedStreamStreamResult(result) diff --git a/python-client/tests/generated/fallible_client.py b/python-client/tests/generated/fallible_client.py index 9c1eb493..8c799556 100644 --- a/python-client/tests/generated/fallible_client.py +++ b/python-client/tests/generated/fallible_client.py @@ -5,31 +5,27 @@ import asyncio from typing import Any -from river.client import RiverClient -from river.streams import Readable, Writable +from river.client import ( + ErrResult, + OkResult, + RiverClient, + StreamResult, + SubscriptionResult, + UploadResult, +) from ._types import ( + FallibleDivideErrorDivByZero, + FallibleDivideErrorInfinity, FallibleDivideInit, + FallibleDivideOutput, + FallibleEchoError, FallibleEchoInit, FallibleEchoInput, + FallibleEchoOutput, ) - -class FallibleEchoStreamResult: - """Streaming result for ``fallible.echo``.""" - - def __init__(self, inner: Any) -> None: - self._inner = inner - - @property - def req_writable(self) -> Writable[FallibleEchoInput]: - """Writable stream for sending requests.""" - return self._inner.req_writable - - @property - def res_readable(self) -> Readable[dict[str, Any]]: - """Readable stream for receiving responses.""" - return self._inner.res_readable +from ._errors import ProtocolError class FallibleClient: @@ -43,7 +39,7 @@ async def divide( init: FallibleDivideInit, *, abort_signal: asyncio.Event | None = None, - ) -> dict[str, Any]: + ) -> OkResult[FallibleDivideOutput] | ErrResult[FallibleDivideErrorDivByZero | FallibleDivideErrorInfinity | ProtocolError]: return await self._client.rpc( "fallible", "divide", @@ -56,11 +52,10 @@ def echo( init: FallibleEchoInit, *, abort_signal: asyncio.Event | None = None, - ) -> FallibleEchoStreamResult: - result = self._client.stream( + ) -> StreamResult[FallibleEchoInput]: + return self._client.stream( "fallible", "echo", init, abort_signal=abort_signal, ) - return FallibleEchoStreamResult(result) diff --git a/python-client/tests/generated/ordering_client.py b/python-client/tests/generated/ordering_client.py index 79adbe5e..eeb0b307 100644 --- a/python-client/tests/generated/ordering_client.py +++ b/python-client/tests/generated/ordering_client.py @@ -5,13 +5,24 @@ import asyncio from typing import Any -from river.client import RiverClient +from river.client import ( + ErrResult, + OkResult, + RiverClient, + StreamResult, + SubscriptionResult, + UploadResult, +) from ._types import ( OrderingAddInit, + OrderingAddOutput, OrderingGetAllInit, + OrderingGetAllOutput, ) +from ._errors import ProtocolError + class OrderingClient: """Typed client for the ``ordering`` service.""" @@ -24,7 +35,7 @@ async def add( init: OrderingAddInit, *, abort_signal: asyncio.Event | None = None, - ) -> dict[str, Any]: + ) -> OkResult[OrderingAddOutput] | ErrResult[ProtocolError]: return await self._client.rpc( "ordering", "add", @@ -37,7 +48,7 @@ async def get_all( init: OrderingGetAllInit, *, abort_signal: asyncio.Event | None = None, - ) -> dict[str, Any]: + ) -> OkResult[OrderingGetAllOutput] | ErrResult[ProtocolError]: return await self._client.rpc( "ordering", "getAll", diff --git a/python-client/tests/generated/subscribable_client.py b/python-client/tests/generated/subscribable_client.py index ccb1abe8..9c1649d3 100644 --- a/python-client/tests/generated/subscribable_client.py +++ b/python-client/tests/generated/subscribable_client.py @@ -5,25 +5,23 @@ import asyncio from typing import Any -from river.client import RiverClient -from river.streams import Readable +from river.client import ( + ErrResult, + OkResult, + RiverClient, + StreamResult, + SubscriptionResult, + UploadResult, +) from ._types import ( SubscribableAddInit, + SubscribableAddOutput, SubscribableValueInit, + SubscribableValueOutput, ) - -class SubscribableValueSubscriptionResult: - """Subscription result for ``subscribable.value``.""" - - def __init__(self, inner: Any) -> None: - self._inner = inner - - @property - def res_readable(self) -> Readable[dict[str, Any]]: - """Readable stream for receiving responses.""" - return self._inner.res_readable +from ._errors import ProtocolError class SubscribableClient: @@ -37,7 +35,7 @@ async def add( init: SubscribableAddInit, *, abort_signal: asyncio.Event | None = None, - ) -> dict[str, Any]: + ) -> OkResult[SubscribableAddOutput] | ErrResult[ProtocolError]: return await self._client.rpc( "subscribable", "add", @@ -50,11 +48,10 @@ def value( init: SubscribableValueInit, *, abort_signal: asyncio.Event | None = None, - ) -> SubscribableValueSubscriptionResult: - result = self._client.subscribe( + ) -> SubscriptionResult: + return self._client.subscribe( "subscribable", "value", init, abort_signal=abort_signal, ) - return SubscribableValueSubscriptionResult(result) diff --git a/python-client/tests/generated/test_client.py b/python-client/tests/generated/test_client.py index ed29ece8..6b9d5a7f 100644 --- a/python-client/tests/generated/test_client.py +++ b/python-client/tests/generated/test_client.py @@ -5,51 +5,29 @@ import asyncio from typing import Any -from river.client import RiverClient -from river.streams import Readable, Writable +from river.client import ( + ErrResult, + OkResult, + RiverClient, + StreamResult, + SubscriptionResult, + UploadResult, +) from ._types import ( TestAddInit, + TestAddOutput, TestEchoBinaryInit, + TestEchoBinaryOutput, TestEchoInit, TestEchoInput, + TestEchoOutput, TestEchoWithPrefixInit, TestEchoWithPrefixInput, + TestEchoWithPrefixOutput, ) - -class TestEchoStreamResult: - """Streaming result for ``test.echo``.""" - - def __init__(self, inner: Any) -> None: - self._inner = inner - - @property - def req_writable(self) -> Writable[TestEchoInput]: - """Writable stream for sending requests.""" - return self._inner.req_writable - - @property - def res_readable(self) -> Readable[dict[str, Any]]: - """Readable stream for receiving responses.""" - return self._inner.res_readable - - -class TestEchoWithPrefixStreamResult: - """Streaming result for ``test.echoWithPrefix``.""" - - def __init__(self, inner: Any) -> None: - self._inner = inner - - @property - def req_writable(self) -> Writable[TestEchoWithPrefixInput]: - """Writable stream for sending requests.""" - return self._inner.req_writable - - @property - def res_readable(self) -> Readable[dict[str, Any]]: - """Readable stream for receiving responses.""" - return self._inner.res_readable +from ._errors import ProtocolError class TestClient: @@ -63,7 +41,7 @@ async def add( init: TestAddInit, *, abort_signal: asyncio.Event | None = None, - ) -> dict[str, Any]: + ) -> OkResult[TestAddOutput] | ErrResult[ProtocolError]: return await self._client.rpc( "test", "add", @@ -76,35 +54,33 @@ def echo( init: TestEchoInit, *, abort_signal: asyncio.Event | None = None, - ) -> TestEchoStreamResult: - result = self._client.stream( + ) -> StreamResult[TestEchoInput]: + return self._client.stream( "test", "echo", init, abort_signal=abort_signal, ) - return TestEchoStreamResult(result) def echo_with_prefix( self, init: TestEchoWithPrefixInit, *, abort_signal: asyncio.Event | None = None, - ) -> TestEchoWithPrefixStreamResult: - result = self._client.stream( + ) -> StreamResult[TestEchoWithPrefixInput]: + return self._client.stream( "test", "echoWithPrefix", init, abort_signal=abort_signal, ) - return TestEchoWithPrefixStreamResult(result) async def echo_binary( self, init: TestEchoBinaryInit, *, abort_signal: asyncio.Event | None = None, - ) -> dict[str, Any]: + ) -> OkResult[TestEchoBinaryOutput] | ErrResult[ProtocolError]: return await self._client.rpc( "test", "echoBinary", diff --git a/python-client/tests/generated/uploadable_client.py b/python-client/tests/generated/uploadable_client.py index 1c6b4737..a14c45e6 100644 --- a/python-client/tests/generated/uploadable_client.py +++ b/python-client/tests/generated/uploadable_client.py @@ -5,65 +5,28 @@ import asyncio from typing import Any -from river.client import RiverClient -from river.streams import Writable +from river.client import ( + ErrResult, + OkResult, + RiverClient, + StreamResult, + SubscriptionResult, + UploadResult, +) from ._types import ( UploadableAddMultipleInit, UploadableAddMultipleInput, + UploadableAddMultipleOutput, UploadableAddMultipleWithPrefixInit, UploadableAddMultipleWithPrefixInput, + UploadableAddMultipleWithPrefixOutput, UploadableCancellableAddInit, UploadableCancellableAddInput, + UploadableCancellableAddOutput, ) - -class UploadableAddMultipleUploadResult: - """Upload result for ``uploadable.addMultiple``.""" - - def __init__(self, inner: Any) -> None: - self._inner = inner - - @property - def req_writable(self) -> Writable[UploadableAddMultipleInput]: - """Writable stream for sending requests.""" - return self._inner.req_writable - - async def finalize(self) -> dict[str, Any]: - """Finalize the upload and get the response.""" - return await self._inner.finalize() - - -class UploadableAddMultipleWithPrefixUploadResult: - """Upload result for ``uploadable.addMultipleWithPrefix``.""" - - def __init__(self, inner: Any) -> None: - self._inner = inner - - @property - def req_writable(self) -> Writable[UploadableAddMultipleWithPrefixInput]: - """Writable stream for sending requests.""" - return self._inner.req_writable - - async def finalize(self) -> dict[str, Any]: - """Finalize the upload and get the response.""" - return await self._inner.finalize() - - -class UploadableCancellableAddUploadResult: - """Upload result for ``uploadable.cancellableAdd``.""" - - def __init__(self, inner: Any) -> None: - self._inner = inner - - @property - def req_writable(self) -> Writable[UploadableCancellableAddInput]: - """Writable stream for sending requests.""" - return self._inner.req_writable - - async def finalize(self) -> dict[str, Any]: - """Finalize the upload and get the response.""" - return await self._inner.finalize() +from ._errors import ProtocolError class UploadableClient: @@ -77,39 +40,36 @@ def add_multiple( init: UploadableAddMultipleInit, *, abort_signal: asyncio.Event | None = None, - ) -> UploadableAddMultipleUploadResult: - result = self._client.upload( + ) -> UploadResult[UploadableAddMultipleInput]: + return self._client.upload( "uploadable", "addMultiple", init, abort_signal=abort_signal, ) - return UploadableAddMultipleUploadResult(result) def add_multiple_with_prefix( self, init: UploadableAddMultipleWithPrefixInit, *, abort_signal: asyncio.Event | None = None, - ) -> UploadableAddMultipleWithPrefixUploadResult: - result = self._client.upload( + ) -> UploadResult[UploadableAddMultipleWithPrefixInput]: + return self._client.upload( "uploadable", "addMultipleWithPrefix", init, abort_signal=abort_signal, ) - return UploadableAddMultipleWithPrefixUploadResult(result) def cancellable_add( self, init: UploadableCancellableAddInit, *, abort_signal: asyncio.Event | None = None, - ) -> UploadableCancellableAddUploadResult: - result = self._client.upload( + ) -> UploadResult[UploadableCancellableAddInput]: + return self._client.upload( "uploadable", "cancellableAdd", init, abort_signal=abort_signal, ) - return UploadableCancellableAddUploadResult(result) diff --git a/python-client/tests/test_codegen.py b/python-client/tests/test_codegen.py index f4020f75..6929e969 100644 --- a/python-client/tests/test_codegen.py +++ b/python-client/tests/test_codegen.py @@ -351,6 +351,7 @@ async def test_subscription_via_generated_client(self, server_url: str) -> None: # Get the initial value done, msg = await sub.res_readable.next() assert not done + assert msg is not None assert msg["ok"] is True assert "count" in msg["payload"] @@ -381,3 +382,80 @@ async def test_fallible_rpc_error(self, server_url: str) -> None: assert result["payload"]["code"] == "DIV_BY_ZERO" finally: await transport.close() + + +class TestCodegenFieldNames: + """Codegen field name validation tests.""" + + def test_keyword_field_raises(self): + """Python keywords are rejected at codegen time.""" + from river.codegen.schema import _safe_field_name + + with pytest.raises(ValueError, match="Python keyword"): + _safe_field_name("from") + with pytest.raises(ValueError, match="Python keyword"): + _safe_field_name("class") + with pytest.raises(ValueError, match="Python keyword"): + _safe_field_name("import") + + def test_normal_field_unchanged(self): + from river.codegen.schema import _safe_field_name + + assert _safe_field_name("name") == "name" + assert _safe_field_name("streamId") == "streamId" + + def test_dash_field_raises(self): + """Fields with dashes are rejected at codegen time.""" + from river.codegen.schema import _safe_field_name + + with pytest.raises(ValueError, match="not a valid Python identifier"): + _safe_field_name("request-id") + + def test_schema_with_invalid_field_raises(self): + """Codegen rejects schemas with non-identifier property names.""" + from river.codegen.schema import SchemaConverter + + converter = SchemaConverter() + schema = { + "type": "object", + "properties": { + "request-id": {"type": "string"}, + "normal": {"type": "string"}, + }, + "required": ["request-id", "normal"], + } + with pytest.raises(ValueError, match="not a valid Python identifier"): + converter._schema_to_typeref(schema, "TestObj") + + def test_schema_with_keyword_field_raises(self): + """Codegen rejects schemas with keyword property names.""" + from river.codegen.schema import SchemaConverter + + converter = SchemaConverter() + schema = { + "type": "object", + "properties": { + "from": {"type": "string"}, + }, + "required": ["from"], + } + with pytest.raises(ValueError, match="Python keyword"): + converter._schema_to_typeref(schema, "TestObj") + + def test_valid_schema_passes(self): + """Schemas with normal camelCase properties work fine.""" + from river.codegen.schema import SchemaConverter + + converter = SchemaConverter() + schema = { + "type": "object", + "properties": { + "userId": {"type": "string"}, + "count": {"type": "number"}, + }, + "required": ["userId", "count"], + } + ref = converter._schema_to_typeref(schema, "TestObj") + assert ref.annotation == "TestObj" + td = converter._typedicts[-1] + assert [f.name for f in td.fields] == ["userId", "count"] diff --git a/python-client/tests/test_e2e.py b/python-client/tests/test_e2e.py index 48b0b868..0ea4f2d8 100644 --- a/python-client/tests/test_e2e.py +++ b/python-client/tests/test_e2e.py @@ -1713,3 +1713,41 @@ async def test_iterator_del_marks_broken_and_wakes(self): # Push after break should be a no-op r._push_value({"ok": True, "payload": 2}) assert not r._has_values_in_queue() + + @pytest.mark.asyncio + async def test_connection_failed_starts_grace_period(self, server_url: str): + """Connection failure starts grace period so session eventually dies.""" + from river.session import SessionOptions + from tests.test_utils import wait_for_session_gone + + transport = WebSocketClientTransport( + ws_url="ws://127.0.0.1:1", # unreachable + client_id=None, + server_id="UNREACHABLE", + codec=NaiveJsonCodec(), + options=SessionOptions( + connection_timeout_ms=100, + session_disconnect_grace_ms=300, + ), + ) + transport.reconnect_on_connection_drop = False + try: + transport.connect("UNREACHABLE") + # Connection will fail; grace period starts + await wait_for_session_gone(transport, "UNREACHABLE") + finally: + await transport.close() + + def test_enable_transparent_reconnects_option(self): + """enable_transparent_reconnects=False disables reconnect.""" + from river.session import SessionOptions + + opts = SessionOptions(enable_transparent_reconnects=False) + transport = WebSocketClientTransport( + ws_url="ws://127.0.0.1:1", + client_id=None, + server_id="SERVER", + codec=NaiveJsonCodec(), + options=opts, + ) + assert transport.reconnect_on_connection_drop is False From 27c04c78833c75bf7abf9a4d13b82913dd9726e1 Mon Sep 17 00:00:00 2001 From: Jacky Zhao Date: Mon, 2 Mar 2026 22:18:13 -0800 Subject: [PATCH 16/29] fixes and regresison --- python-client/river/client.py | 25 +++- python-client/river/codegen/emitter.py | 12 ++ python-client/river/codegen/schema.py | 7 +- .../codegen/templates/service_client.py.j2 | 4 + python-client/river/streams.py | 2 +- python-client/river/transport.py | 10 +- python-client/tests/conftest.py | 53 ++++++--- python-client/tests/test_e2e.py | 49 ++++++++ python-client/tests/test_session.py | 109 ++++++++++++++++++ 9 files changed, 252 insertions(+), 19 deletions(-) diff --git a/python-client/river/client.py b/python-client/river/client.py index ec99f441..c236f5fc 100644 --- a/python-client/river/client.py +++ b/python-client/river/client.py @@ -13,6 +13,7 @@ from typing_extensions import TypedDict +from river.session import SessionState from river.streams import Readable, Writable from river.transport import WebSocketClientTransport from river.types import ( @@ -259,8 +260,30 @@ def _handle_proc( if self._connect_on_invoke: transport.connect(to) - # Get the session and a send function + # Get the session and a send function. + # If connect() couldn't start (retry budget exhausted, transport + # closing, etc.) the session will be in NO_CONNECTION with no + # connect task in flight — fail immediately instead of hanging. session = transport._get_or_create_session(to) + connect_task = transport._connect_tasks.get(to) + has_active_connect = connect_task is not None and not connect_task.done() + if session.state == SessionState.NO_CONNECTION and not has_active_connect: + transport._delete_session(to, emit_closing=False) + res_readable = Readable() + res_readable._push_value( + err_result( + UNEXPECTED_DISCONNECT_CODE, + f"{to} connection failed", + ) + ) + res_readable._trigger_close() + req_writable = Writable(write_cb=lambda _: None, close_cb=None) + req_writable._closed = True + return { + "res_readable": res_readable, + "req_writable": req_writable, + } + session_id = session.id try: send_fn = transport.get_session_bound_send_fn(to, session_id) diff --git a/python-client/river/codegen/emitter.py b/python-client/river/codegen/emitter.py index 06e5c7f2..6bb7e49a 100644 --- a/python-client/river/codegen/emitter.py +++ b/python-client/river/codegen/emitter.py @@ -132,6 +132,17 @@ def render_service_client(svc: ServiceDef, ir: SchemaIR, import_prefix: str) -> has_upload = "upload" in proc_types has_subscription = "subscription" in proc_types + # Check if any annotation references Literal (e.g. const schemas) + all_annotations = [] + for p in svc.procedures: + all_annotations.append(p.init_type.annotation) + all_annotations.append(p.output_type.annotation) + if p.input_type: + all_annotations.append(p.input_type.annotation) + if p.error_type: + all_annotations.append(p.error_type.annotation) + needs_literal = any("Literal[" in a for a in all_annotations) + return _env.get_template("service_client.py.j2").render( service=svc, type_names=type_names, @@ -140,6 +151,7 @@ def render_service_client(svc: ServiceDef, ir: SchemaIR, import_prefix: str) -> has_stream=has_stream, has_upload=has_upload, has_subscription=has_subscription, + needs_literal=needs_literal, ) diff --git a/python-client/river/codegen/schema.py b/python-client/river/codegen/schema.py index c9da7189..c957d7a1 100644 --- a/python-client/river/codegen/schema.py +++ b/python-client/river/codegen/schema.py @@ -151,6 +151,7 @@ def __init__(self) -> None: def convert(self, raw: dict) -> SchemaIR: """Convert the top-level serialized schema dict to IR.""" + self._typedicts = [] services: list[ServiceDef] = [] for svc_name, svc_data in raw.get("services", {}).items(): svc_def = self._convert_service(svc_name, svc_data) @@ -247,7 +248,11 @@ def _schema_to_typeref(self, schema: dict, name_hint: str) -> TypeRef: if "const" in schema: val = schema["const"] if isinstance(val, str): - return TypeRef(annotation=f'Literal["{val}"]') + # Use repr to handle all escaping (quotes, backslashes, + # control chars) then unwrap the outer quotes and re-wrap + # with double quotes for Literal["..."] syntax. + escaped = repr(val)[1:-1].replace('"', '\\"') + return TypeRef(annotation=f'Literal["{escaped}"]') return TypeRef(annotation=f"Literal[{val!r}]") # anyOf (union) diff --git a/python-client/river/codegen/templates/service_client.py.j2 b/python-client/river/codegen/templates/service_client.py.j2 index 560993fa..2cd7ff0a 100644 --- a/python-client/river/codegen/templates/service_client.py.j2 +++ b/python-client/river/codegen/templates/service_client.py.j2 @@ -3,7 +3,11 @@ from __future__ import annotations import asyncio +{% if needs_literal %} +from typing import Any, Literal +{% else %} from typing import Any +{% endif %} from river.client import ( ErrResult, diff --git a/python-client/river/streams.py b/python-client/river/streams.py index a3238497..40c67208 100644 --- a/python-client/river/streams.py +++ b/python-client/river/streams.py @@ -55,7 +55,7 @@ def is_readable(self) -> bool: return not self._locked and not self._broken def is_closed(self) -> bool: - """Whether the stream has been closed.""" + """Whether the stream is fully consumed (closed and queue drained).""" return self._closed and len(self._queue) == 0 def _has_values_in_queue(self) -> bool: diff --git a/python-client/river/transport.py b/python-client/river/transport.py index e8130f68..3cbf7671 100644 --- a/python-client/river/transport.py +++ b/python-client/river/transport.py @@ -235,6 +235,7 @@ def connect(self, to: str | None = None) -> None: session.state = SessionState.BACKING_OFF async def _do_connect(): + ws = None try: if backoff_ms > 0: await asyncio.sleep(backoff_ms / 1000.0) @@ -252,7 +253,9 @@ async def _do_connect(): session.state = SessionState.HANDSHAKING await self._do_handshake(session, ws, to) except asyncio.CancelledError: - pass + # Clean up socket if we got cancelled mid-handshake + if ws is not None and session._ws is not ws: + await ws.close() except Exception as e: logger.debug("Connection attempt failed for %s: %s", to, e) if not session._destroyed: @@ -484,9 +487,12 @@ def _on_connection_failed(self, to: str) -> None: # Transition to NoConnection with grace period so the session # is eventually destroyed if reconnect doesn't succeed. + # Only start the grace period if one isn't already running, + # so repeated failures don't keep extending the deadline. loop = self._get_loop() session.state = SessionState.NO_CONNECTION - session.start_grace_period(loop) + if session._grace_period_task is None or session._grace_period_task.done(): + session.start_grace_period(loop) if self._reconnect_on_connection_drop: self._try_reconnecting(to) diff --git a/python-client/tests/conftest.py b/python-client/tests/conftest.py index 31b908f4..3157c0c5 100644 --- a/python-client/tests/conftest.py +++ b/python-client/tests/conftest.py @@ -8,6 +8,7 @@ import os import re +import selectors import signal import subprocess import sys @@ -119,20 +120,44 @@ def _start_server( port = None deadline = time.monotonic() + 30 assert proc.stdout is not None - while time.monotonic() < deadline: - line = proc.stdout.readline().decode("utf-8").strip() - if not line: - if proc.poll() is not None: - stderr = proc.stderr.read().decode("utf-8") if proc.stderr else "" - raise RuntimeError( - f"{label} exited with code {proc.returncode}.\nstderr: {stderr}" - ) - time.sleep(0.1) - continue - m = re.match(r"RIVER_PORT=(\d+)", line) - if m: - port = int(m.group(1)) - break + sel = selectors.DefaultSelector() + sel.register(proc.stdout, selectors.EVENT_READ) + buf = b"" + try: + while time.monotonic() < deadline: + remaining = deadline - time.monotonic() + if remaining <= 0: + break + ready = sel.select(timeout=min(remaining, 1.0)) + if not ready: + if proc.poll() is not None: + stderr = proc.stderr.read().decode("utf-8") if proc.stderr else "" + raise RuntimeError( + f"{label} exited with code {proc.returncode}.\nstderr: {stderr}" + ) + continue + chunk = proc.stdout.read1(4096) # type: ignore[union-attr] + if not chunk: + # EOF — child closed stdout (likely exited) + if proc.poll() is not None: + stderr = proc.stderr.read().decode("utf-8") if proc.stderr else "" + raise RuntimeError( + f"{label} exited with code {proc.returncode}.\nstderr: {stderr}" + ) + continue + buf += chunk + while b"\n" in buf: + line_bytes, buf = buf.split(b"\n", 1) + line = line_bytes.decode("utf-8").strip() + m = re.match(r"RIVER_PORT=(\d+)", line) + if m: + port = int(m.group(1)) + break + if port is not None: + break + finally: + sel.unregister(proc.stdout) + sel.close() if port is None: proc.kill() diff --git a/python-client/tests/test_e2e.py b/python-client/tests/test_e2e.py index 0ea4f2d8..cd2e6545 100644 --- a/python-client/tests/test_e2e.py +++ b/python-client/tests/test_e2e.py @@ -1751,3 +1751,52 @@ def test_enable_transparent_reconnects_option(self): options=opts, ) assert transport.reconnect_on_connection_drop is False + + def test_literal_const_escaping(self): + """String consts with quotes/backslashes/control chars are escaped.""" + from river.codegen.schema import SchemaConverter + + converter = SchemaConverter() + schema = {"const": 'a"b'} + ref = converter._schema_to_typeref(schema, "Test") + assert ref.annotation == 'Literal["a\\"b"]' + + schema2 = {"const": "a\\b"} + ref2 = converter._schema_to_typeref(schema2, "Test") + assert ref2.annotation == 'Literal["a\\\\b"]' + + # Control characters must be escaped + schema3 = {"const": "line1\nline2"} + ref3 = converter._schema_to_typeref(schema3, "Test") + assert ref3.annotation == 'Literal["line1\\nline2"]' + + schema4 = {"const": "a\tb"} + ref4 = converter._schema_to_typeref(schema4, "Test") + assert ref4.annotation == 'Literal["a\\tb"]' + + def test_is_closed_with_buffered_data(self): + """is_closed() is False when closed but queue has data.""" + from river.streams import Readable + + r: Readable = Readable() + r._push_value({"val": 1}) + r._trigger_close() + # Closed but not fully consumed + assert r.is_closed() is False + assert r._closed is True + + @pytest.mark.asyncio + async def test_close_cancels_inflight_connect(self, server_url: str): + """close() during handshake doesn't leak the websocket.""" + transport = WebSocketClientTransport( + ws_url=server_url, + client_id=None, + server_id="SERVER", + codec=NaiveJsonCodec(), + ) + transport.connect("SERVER") + # Let connection start but don't wait for completion + await asyncio.sleep(0) + await transport.close() + # No leaked sessions + assert len(transport.sessions) == 0 diff --git a/python-client/tests/test_session.py b/python-client/tests/test_session.py index 4e8a4326..42102a90 100644 --- a/python-client/tests/test_session.py +++ b/python-client/tests/test_session.py @@ -390,3 +390,112 @@ async def test_connect_on_invoke_false_no_reconnect(self, server_url: str): finally: # transport already closed above pass + + +# ===================================================================== +# Regression: stale connect-task must not block fail-fast +# ===================================================================== + + +class TestStaleConnectTask: + @pytest.mark.asyncio + async def test_done_connect_task_does_not_block_failfast(self): + """A completed (done) connect task in _connect_tasks must not + prevent the fail-fast path from firing. + + Regression: previously the check was `to not in _connect_tasks`, + so a done task kept the entry alive and calls would hang instead + of failing immediately when retries were exhausted. + """ + transport = WebSocketClientTransport( + ws_url="ws://127.0.0.1:1", # unreachable + client_id=None, + server_id="STALE", + codec=NaiveJsonCodec(), + options=SessionOptions( + connection_timeout_ms=100, + handshake_timeout_ms=100, + session_disconnect_grace_ms=200, + ), + ) + transport.reconnect_on_connection_drop = False + try: + # Trigger a connect that will fail + transport.connect("STALE") + await wait_for_session_gone(transport, "STALE") + + # The done task is still in _connect_tasks + assert "STALE" in transport._connect_tasks + assert transport._connect_tasks["STALE"].done() + + # Exhaust the retry budget so connect() is a no-op + transport._retry_budget.budget_consumed = ( + transport._retry_budget.attempt_budget_capacity + ) + + # RPC must fail immediately, not hang + client = RiverClient( + transport, server_id="STALE", connect_on_invoke=True + ) + result = await asyncio.wait_for( + client.rpc("test", "add", {"n": 1}), timeout=1.0 + ) + assert result["ok"] is False + assert result["payload"]["code"] == "UNEXPECTED_DISCONNECT" + finally: + await transport.close() + + +# ===================================================================== +# Regression: grace period must not reset on each failed reconnect +# ===================================================================== + + +class TestGracePeriodNotResetOnRetry: + @pytest.mark.asyncio + async def test_grace_period_not_extended_by_retries(self, server_url: str): + """Repeated connection failures must not restart the grace timer. + + Regression: _on_connection_failed() unconditionally called + start_grace_period(), which cancelled and restarted the timer + on every retry, extending session lifetime far beyond + session_disconnect_grace_ms. + """ + grace_ms = 400 + transport = WebSocketClientTransport( + ws_url="ws://127.0.0.1:1", # unreachable + client_id=None, + server_id="GRACE", + codec=NaiveJsonCodec(), + options=SessionOptions( + connection_timeout_ms=100, + handshake_timeout_ms=100, + session_disconnect_grace_ms=grace_ms, + ), + ) + try: + transport.connect("GRACE") + + # Wait for at least one connection failure to set the grace period + await wait_for( + lambda: ( + (s := transport.sessions.get("GRACE")) is not None + and s._grace_period_task is not None + ), + timeout=2.0, + ) + + session = transport.sessions["GRACE"] + original_expiry = session._grace_expiry_time + assert original_expiry is not None + + # After further retries, the expiry time must not have moved forward + await asyncio.sleep(0.2) + session2 = transport.sessions.get("GRACE") + if session2 is not None and session2._grace_expiry_time is not None: + assert session2._grace_expiry_time <= original_expiry + + # Session should be gone within grace_ms + generous margin + await wait_for_session_gone(transport, "GRACE", timeout=3.0) + finally: + await transport.close() From 744772f162bdf20d33c0acf0bff7ca1140af4ed9 Mon Sep 17 00:00:00 2001 From: Jacky Zhao Date: Mon, 2 Mar 2026 22:30:20 -0800 Subject: [PATCH 17/29] lint whoops --- python-client/tests/test_session.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/python-client/tests/test_session.py b/python-client/tests/test_session.py index 42102a90..f29ad624 100644 --- a/python-client/tests/test_session.py +++ b/python-client/tests/test_session.py @@ -434,9 +434,7 @@ async def test_done_connect_task_does_not_block_failfast(self): ) # RPC must fail immediately, not hang - client = RiverClient( - transport, server_id="STALE", connect_on_invoke=True - ) + client = RiverClient(transport, server_id="STALE", connect_on_invoke=True) result = await asyncio.wait_for( client.rpc("test", "add", {"n": 1}), timeout=1.0 ) From 0c133fc2e7d1e7b4771b8d708a44c8abb014e9ec Mon Sep 17 00:00:00 2001 From: Jacky Zhao Date: Mon, 2 Mar 2026 22:52:17 -0800 Subject: [PATCH 18/29] fix --- python-client/river/client.py | 4 +- python-client/river/codegen/schema.py | 11 ++++- python-client/river/session.py | 29 +++++++----- python-client/river/streams.py | 4 +- python-client/river/transport.py | 4 +- python-client/tests/test_codegen.py | 47 ++++++++++++++++++++ python-client/tests/test_session.py | 63 +++++++++++++++++++++++++++ 7 files changed, 143 insertions(+), 19 deletions(-) diff --git a/python-client/river/client.py b/python-client/river/client.py index c236f5fc..e6c87b0d 100644 --- a/python-client/river/client.py +++ b/python-client/river/client.py @@ -268,7 +268,7 @@ def _handle_proc( connect_task = transport._connect_tasks.get(to) has_active_connect = connect_task is not None and not connect_task.done() if session.state == SessionState.NO_CONNECTION and not has_active_connect: - transport._delete_session(to, emit_closing=False) + transport._delete_session(to) res_readable = Readable() res_readable._push_value( err_result( @@ -473,7 +473,7 @@ async def _watch_abort(): on_client_cancel() try: - loop = asyncio.get_event_loop() + loop = asyncio.get_running_loop() abort_task = loop.create_task(_watch_abort()) except RuntimeError: pass diff --git a/python-client/river/codegen/schema.py b/python-client/river/codegen/schema.py index c957d7a1..f04b4417 100644 --- a/python-client/river/codegen/schema.py +++ b/python-client/river/codegen/schema.py @@ -89,8 +89,8 @@ def _sanitize_identifier(s: str) -> str: """Replace characters illegal in Python identifiers with underscores.""" # Replace dashes, spaces, and other non-alnum/non-underscore chars s = re.sub(r"[^a-zA-Z0-9_]", "_", s) - # Strip leading underscores/digits so the result is a valid identifier - s = re.sub(r"^[_0-9]+", "", s) + # Strip leading digits so the result is a valid identifier + s = re.sub(r"^[0-9]+", "", s) return s or "unnamed" @@ -135,6 +135,13 @@ def _safe_field_name(name: str) -> str: f"schema property {name!r} is a Python keyword " f"and cannot be used as a TypedDict field" ) + # Names starting with __ (and not ending with __) are name-mangled + # inside class bodies, so the TypedDict key won't match the wire key. + if name.startswith("__") and not name.endswith("__"): + raise ValueError( + f"schema property {name!r} would be name-mangled in a " + f"TypedDict class body and cannot be used as a field" + ) return name diff --git a/python-client/river/session.py b/python-client/river/session.py index 0e16abc7..4a1ba043 100644 --- a/python-client/river/session.py +++ b/python-client/river/session.py @@ -155,20 +155,27 @@ def _send_over_wire(self, msg: TransportMessage) -> tuple[bool, str]: assert isinstance(buf_or_err, bytes) try: assert self._ws is not None - # websockets library uses async send, but we schedule it - buf = buf_or_err - asyncio.get_event_loop().call_soon(lambda data=buf: self._do_ws_send(data)) + loop = asyncio.get_running_loop() + task = loop.create_task(self._ws.send(buf_or_err)) + task.add_done_callback(self._on_ws_send_done) return True, msg.id except Exception as e: return False, f"Failed to send: {e}" - def _do_ws_send(self, data: bytes) -> None: - """Actually send data over the WebSocket.""" - if self._ws is not None and not self._destroyed: - try: - asyncio.ensure_future(self._ws.send(data)) - except Exception as e: - logger.error("WebSocket send error: %s", e) + def _on_ws_send_done(self, task: asyncio.Task) -> None: + """Handle completion of an async ws.send(). + + If the send failed, trigger the connection-closed callback so the + transport can reconnect and replay the send buffer — matching how + the TS side relies on synchronous send exceptions. + """ + if task.cancelled(): + return + exc = task.exception() + if exc is not None: + logger.error("WebSocket send error: %s", exc) + if not self._destroyed and self._on_connection_closed: + self._on_connection_closed() def send_buffered_messages(self) -> tuple[bool, str | None]: """Retransmit all buffered messages over the current connection. @@ -245,7 +252,7 @@ def _reset_heartbeat_miss_timeout(self) -> None: self._heartbeat_miss_task.cancel() self._heartbeat_miss_task = None try: - loop = asyncio.get_event_loop() + loop = asyncio.get_running_loop() if loop.is_running(): self.start_heartbeat_miss_timeout(loop) except RuntimeError: diff --git a/python-client/river/streams.py b/python-client/river/streams.py index 40c67208..6c045435 100644 --- a/python-client/river/streams.py +++ b/python-client/river/streams.py @@ -128,7 +128,7 @@ async def _iterate(self): return # Wait for more data - loop = asyncio.get_event_loop() + loop = asyncio.get_running_loop() fut: asyncio.Future[None] = loop.create_future() self._waiters.append(fut) await fut @@ -180,7 +180,7 @@ async def __anext__(self): if r._closed: raise StopAsyncIteration - loop = asyncio.get_event_loop() + loop = asyncio.get_running_loop() fut: asyncio.Future[None] = loop.create_future() r._waiters.append(fut) await fut diff --git a/python-client/river/transport.py b/python-client/river/transport.py index 3cbf7671..f392433c 100644 --- a/python-client/river/transport.py +++ b/python-client/river/transport.py @@ -99,7 +99,7 @@ async def _restore_loop(): pass try: - loop = asyncio.get_event_loop() + loop = asyncio.get_running_loop() self._restore_task = loop.create_task(_restore_loop()) except RuntimeError: pass @@ -155,7 +155,7 @@ def get_status(self) -> str: def _get_loop(self) -> asyncio.AbstractEventLoop: if self._loop is None: - self._loop = asyncio.get_event_loop() + self._loop = asyncio.get_running_loop() return self._loop # --- Event API --- diff --git a/python-client/tests/test_codegen.py b/python-client/tests/test_codegen.py index 6929e969..b081ed37 100644 --- a/python-client/tests/test_codegen.py +++ b/python-client/tests/test_codegen.py @@ -404,6 +404,53 @@ def test_normal_field_unchanged(self): assert _safe_field_name("name") == "name" assert _safe_field_name("streamId") == "streamId" + def test_underscore_prefixed_field_accepted(self): + """Underscore-prefixed fields like _id are valid Python identifiers. + + Regression: _sanitize_identifier stripped leading underscores, + causing _safe_field_name to reject valid fields like '_id'. + """ + from river.codegen.schema import _safe_field_name + + assert _safe_field_name("_id") == "_id" + assert _safe_field_name("_private") == "_private" + + def test_dunder_field_rejected(self): + """Double-underscore-prefixed fields are name-mangled in class bodies. + + Regression: after allowing leading underscores, __dunder fields + were accepted but would be name-mangled in the generated TypedDict + class body, making the key not match the wire representation. + """ + from river.codegen.schema import _safe_field_name + + with pytest.raises(ValueError, match="name-mangled"): + _safe_field_name("__dunder") + with pytest.raises(ValueError, match="name-mangled"): + _safe_field_name("__private") + # Dunder methods (ending with __) are NOT mangled + assert _safe_field_name("__init__") == "__init__" + + def test_schema_with_underscore_prefixed_field(self): + """Schemas with underscore-prefixed properties generate correctly.""" + from river.codegen.schema import SchemaConverter + + converter = SchemaConverter() + schema = { + "type": "object", + "properties": { + "_id": {"type": "string"}, + "name": {"type": "string"}, + }, + "required": ["_id", "name"], + } + ref = converter._schema_to_typeref(schema, "Doc") + assert ref.annotation == "Doc" + td = converter._typedicts[-1] + field_names = [f.name for f in td.fields] + assert "_id" in field_names + assert "name" in field_names + def test_dash_field_raises(self): """Fields with dashes are rejected at codegen time.""" from river.codegen.schema import _safe_field_name diff --git a/python-client/tests/test_session.py b/python-client/tests/test_session.py index f29ad624..bfa3a725 100644 --- a/python-client/tests/test_session.py +++ b/python-client/tests/test_session.py @@ -497,3 +497,66 @@ async def test_grace_period_not_extended_by_retries(self, server_url: str): await wait_for_session_gone(transport, "GRACE", timeout=3.0) finally: await transport.close() + + +# ===================================================================== +# Regression: fail-fast must not orphan existing in-flight procedures +# ===================================================================== + + +class TestFailFastEmitsClosing: + @pytest.mark.asyncio + async def test_failfast_notifies_existing_streams(self, server_url: str): + """When fail-fast deletes a session, existing in-flight procedures + must receive the sessionStatus 'closing' event so they get + UNEXPECTED_DISCONNECT instead of hanging. + + Regression: _delete_session was called with emit_closing=False, + silently removing the session while older streams still waited. + """ + transport = WebSocketClientTransport( + ws_url=server_url, + client_id=None, + server_id="SERVER", + codec=NaiveJsonCodec(), + options=SHORT_OPTIONS, + ) + client = RiverClient( + transport, + server_id="SERVER", + connect_on_invoke=True, + eagerly_connect=False, + ) + try: + # Open a stream so there's an in-flight procedure + stream = client.stream("test", "echo", {}) + stream.req_writable.write({"msg": "hello", "ignore": False}) + done, msg = await stream.res_readable.next() + assert not done + assert msg["ok"] is True + + # Drop connection, disable reconnect so session stays NO_CONNECTION + transport.reconnect_on_connection_drop = False + session = transport.sessions.get("SERVER") + assert session is not None + await session._ws.close() + await wait_for_disconnected(transport) + + # Exhaust retry budget so connect() in the next RPC is a no-op + transport._retry_budget.budget_consumed = ( + transport._retry_budget.attempt_budget_capacity + ) + + # This RPC hits the fail-fast path and deletes the session + result = await asyncio.wait_for( + client.rpc("test", "add", {"n": 1}), timeout=2.0 + ) + assert result["ok"] is False + + # The existing stream must have received UNEXPECTED_DISCONNECT + done, msg = await asyncio.wait_for(stream.res_readable.next(), timeout=2.0) + assert not done + assert msg["ok"] is False + assert msg["payload"]["code"] == "UNEXPECTED_DISCONNECT" + finally: + await transport.close() From 4e2bbc400b52b040eccc015bb56651f2cb5d0c58 Mon Sep 17 00:00:00 2001 From: Jacky Zhao Date: Mon, 2 Mar 2026 22:56:11 -0800 Subject: [PATCH 19/29] fix fix --- package.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/package.json b/package.json index f9510f34..ff12988c 100644 --- a/package.json +++ b/package.json @@ -73,7 +73,8 @@ "format:fix": "npx prettier . --write", "lint": "eslint .", "lint:fix": "eslint . --fix", - "fix": "npm run format:fix && npm run lint:fix", + "fix": "npm run format:fix && npm run lint:fix && npm run fix:python", + "fix:python": "cd python-client && ruff check --fix . && ruff format .", "build": "rm -rf dist && tsup && du -sh dist", "prepack": "npm run build", "release": "npm publish --access public", From 97c34ecbd697252fc335b464d933964add98761d Mon Sep 17 00:00:00 2001 From: Jacky Zhao Date: Mon, 2 Mar 2026 23:22:26 -0800 Subject: [PATCH 20/29] otel propagation --- python-client/river/session.py | 5 +- python-client/river/transport.py | 20 ++++++- python-client/tests/test_e2e.py | 96 ++++++++++++++++++++++++++++++++ 3 files changed, 119 insertions(+), 2 deletions(-) diff --git a/python-client/river/session.py b/python-client/river/session.py index 4a1ba043..dcdb5748 100644 --- a/python-client/river/session.py +++ b/python-client/river/session.py @@ -331,7 +331,9 @@ def destroy(self) -> None: self._ws = None self.send_buffer.clear() - def create_handshake_request(self, metadata: Any = None) -> TransportMessage: + def create_handshake_request( + self, metadata: Any = None, tracing: dict[str, str] | None = None + ) -> TransportMessage: """Create a handshake request transport message. Handshake messages have seq=0, ack=0, controlFlags=0. @@ -351,4 +353,5 @@ def create_handshake_request(self, metadata: Any = None) -> TransportMessage: payload=payload, stream_id=generate_id(), control_flags=0, + tracing=tracing, ) diff --git a/python-client/river/transport.py b/python-client/river/transport.py index f392433c..d5c4130f 100644 --- a/python-client/river/transport.py +++ b/python-client/river/transport.py @@ -278,10 +278,28 @@ async def _create_connection(self, to: str) -> Any: ) return ws + def _get_otel_propagation_context(self) -> dict[str, str] | None: + """Extract OTel propagation context if opentelemetry is installed.""" + try: + from opentelemetry import propagate + except ImportError: + return None + ctx: dict[str, str] = {} + propagate.inject(ctx) + result = {} + if ctx.get("traceparent"): + result["traceparent"] = ctx["traceparent"] + if ctx.get("tracestate"): + result["tracestate"] = ctx["tracestate"] + return result or None + async def _do_handshake(self, session: Session, ws: Any, to: str) -> None: """Perform the handshake on a newly connected WebSocket.""" # Send handshake request - hs_msg = session.create_handshake_request(metadata=self._handshake_metadata) + tracing = self._get_otel_propagation_context() + hs_msg = session.create_handshake_request( + metadata=self._handshake_metadata, tracing=tracing + ) ok, buf = self._codec_adapter.to_buffer(hs_msg) if not ok: # Handshake send failure is fatal — destroy session diff --git a/python-client/tests/test_e2e.py b/python-client/tests/test_e2e.py index cd2e6545..3ba2df6e 100644 --- a/python-client/tests/test_e2e.py +++ b/python-client/tests/test_e2e.py @@ -1800,3 +1800,99 @@ async def test_close_cancels_inflight_connect(self, server_url: str): await transport.close() # No leaked sessions assert len(transport.sessions) == 0 + + +# ===================================================================== +# OTel Tracing Propagation Tests +# ===================================================================== + + +class TestOtelTracingPropagation: + def test_handshake_includes_tracing_when_otel_available(self): + """Handshake message includes tracing when OTel propagation is configured.""" + + from river.codec import CodecMessageAdapter, NaiveJsonCodec + from river.session import Session + + session = Session( + session_id="test-session", + from_id="client", + to_id="server", + codec=CodecMessageAdapter(NaiveJsonCodec()), + ) + + tracing = { + "traceparent": "00-abc123-def456-01", + "tracestate": "vendor=value", + } + msg = session.create_handshake_request(tracing=tracing) + assert msg.tracing == tracing + wire = msg.to_dict() + assert wire["tracing"] == tracing + + def test_handshake_omits_tracing_when_none(self): + """Handshake message omits tracing when not provided.""" + from river.codec import CodecMessageAdapter, NaiveJsonCodec + from river.session import Session + + session = Session( + session_id="test-session", + from_id="client", + to_id="server", + codec=CodecMessageAdapter(NaiveJsonCodec()), + ) + + msg = session.create_handshake_request() + assert msg.tracing is None + wire = msg.to_dict() + assert "tracing" not in wire + + def test_get_otel_propagation_context_with_mock(self): + """_get_otel_propagation_context extracts traceparent/tracestate.""" + import types + from unittest.mock import patch + + transport = WebSocketClientTransport( + ws_url="ws://localhost:0", + client_id="test", + server_id="SERVER", + codec=NaiveJsonCodec(), + ) + + def fake_inject(carrier): + carrier["traceparent"] = "00-tid-sid-01" + carrier["tracestate"] = "k=v" + + # Create a fake opentelemetry module with a propagate submodule + fake_otel = types.ModuleType("opentelemetry") + fake_propagate = types.ModuleType("opentelemetry.propagate") + fake_propagate.inject = fake_inject # type: ignore[attr-defined] + fake_otel.propagate = fake_propagate # type: ignore[attr-defined] + + with patch.dict( + "sys.modules", + {"opentelemetry": fake_otel, "opentelemetry.propagate": fake_propagate}, + ): + result = transport._get_otel_propagation_context() + + assert result == { + "traceparent": "00-tid-sid-01", + "tracestate": "k=v", + } + + def test_get_otel_propagation_context_without_otel(self): + """_get_otel_propagation_context returns None when OTel is not installed.""" + from unittest.mock import patch + + transport = WebSocketClientTransport( + ws_url="ws://localhost:0", + client_id="test", + server_id="SERVER", + codec=NaiveJsonCodec(), + ) + + # Ensure opentelemetry is not importable + with patch.dict("sys.modules", {"opentelemetry": None}): + result = transport._get_otel_propagation_context() + + assert result is None From 208c047d2193a4ad6ae903727e4bed16b1710705 Mon Sep 17 00:00:00 2001 From: Jacky Zhao Date: Mon, 2 Mar 2026 23:48:05 -0800 Subject: [PATCH 21/29] fixy fixy --- python-client/pyproject.toml | 1 + python-client/river/__init__.py | 3 +- python-client/river/client.py | 13 +- python-client/river/codec.py | 69 +- python-client/river/codegen/schema.py | 87 ++- python-client/river/transport.py | 24 +- python-client/tests/conftest.py | 30 +- python-client/tests/extract_test_schema.ts | 15 + python-client/tests/generated/_types.py | 10 + python-client/tests/generated/test_client.py | 15 + python-client/tests/test_codegen.py | 712 ++++++++++++++++++- python-client/tests/test_e2e.py | 247 ++++--- python-client/tests/test_equivalence.py | 6 +- python-client/tests/test_handshake.py | 8 +- python-client/tests/test_schema.json | 137 ++++ python-client/tests/test_server.ts | 15 + python-client/tests/test_server_handshake.ts | 2 + python-client/tests/test_session.py | 16 +- 18 files changed, 1196 insertions(+), 214 deletions(-) diff --git a/python-client/pyproject.toml b/python-client/pyproject.toml index 09cac636..52ef09eb 100644 --- a/python-client/pyproject.toml +++ b/python-client/pyproject.toml @@ -13,6 +13,7 @@ dependencies = [ "msgpack>=1.0", "typing_extensions>=4.0", "jinja2>=3.0", + "opentelemetry-api>=1.0", ] [project.optional-dependencies] diff --git a/python-client/river/__init__.py b/python-client/river/__init__.py index 46faca65..f739d745 100644 --- a/python-client/river/__init__.py +++ b/python-client/river/__init__.py @@ -11,7 +11,7 @@ SubscriptionResult, UploadResult, ) -from river.codec import BinaryCodec, NaiveJsonCodec +from river.codec import BinaryCodec from river.streams import Readable, Writable from river.transport import WebSocketClientTransport from river.types import Err, Ok, TransportMessage @@ -24,7 +24,6 @@ "UploadResult", "SubscriptionResult", "WebSocketClientTransport", - "NaiveJsonCodec", "BinaryCodec", "TransportMessage", "Ok", diff --git a/python-client/river/client.py b/python-client/river/client.py index e6c87b0d..b7c6f812 100644 --- a/python-client/river/client.py +++ b/python-client/river/client.py @@ -384,19 +384,26 @@ def on_message(msg: TransportMessage) -> None: if msg.to != transport.client_id: return - # Cancel from server + # Cancel from server — always an error if is_stream_cancel(msg.control_flags): clean_close = False payload = msg.payload - if isinstance(payload, dict) and "ok" in payload: + if isinstance(payload, dict) and "ok" in payload and not payload["ok"]: + # Already error-shaped, forward as-is res_readable._push_value(payload) else: + # Force to error shape (reject ok:true on cancel) code = ( payload.get("code", "UNKNOWN") if isinstance(payload, dict) else "UNKNOWN" ) - res_readable._push_value(err_result(code, str(payload))) + message = ( + payload.get("message", str(payload)) + if isinstance(payload, dict) + else str(payload) + ) + res_readable._push_value(err_result(code, message)) close_readable() if req_writable.is_writable(): req_writable.close() diff --git a/python-client/river/codec.py b/python-client/river/codec.py index e7c182c1..53cc966d 100644 --- a/python-client/river/codec.py +++ b/python-client/river/codec.py @@ -2,8 +2,6 @@ from __future__ import annotations -import base64 -import json from abc import ABC, abstractmethod from typing import Any @@ -24,49 +22,6 @@ def from_buffer(self, buf: bytes) -> dict[str, Any]: ... -_MAX_SAFE_INTEGER = 2**53 - 1 -_MIN_SAFE_INTEGER = -(2**53 - 1) - - -def _prepare_for_json(obj: Any) -> Any: - """Recursively replace bytes and large ints with wire markers.""" - if isinstance(obj, (bytes, bytearray)): - return {"$t": base64.b64encode(obj).decode("ascii")} - if isinstance(obj, bool): - return obj - if isinstance(obj, int): - if obj > _MAX_SAFE_INTEGER or obj < _MIN_SAFE_INTEGER: - return {"$b": str(obj)} - return obj - if isinstance(obj, dict): - return {k: _prepare_for_json(v) for k, v in obj.items()} - if isinstance(obj, (list, tuple)): - return [_prepare_for_json(v) for v in obj] - return obj - - -def _custom_object_hook(obj: dict) -> Any: - """JSON decoder hook for custom types.""" - if "$t" in obj and len(obj) == 1: - return base64.b64decode(obj["$t"]) - if "$b" in obj and len(obj) == 1: - return int(obj["$b"]) - return obj - - -class NaiveJsonCodec(Codec): - """Codec using JSON serialization (matches TypeScript NaiveJsonCodec).""" - - name = "naive" - - def to_buffer(self, obj: dict[str, Any]) -> bytes: - prepared = _prepare_for_json(obj) - return json.dumps(prepared, separators=(",", ":")).encode("utf-8") - - def from_buffer(self, buf: bytes) -> dict[str, Any]: - return json.loads(buf.decode("utf-8"), object_hook=_custom_object_hook) - - _BIGINT_EXT_TYPE = 0 _MSGPACK_INT_MAX = 2**64 - 1 _MSGPACK_INT_MIN = -(2**63) @@ -136,9 +91,27 @@ def from_buffer(self, buf: bytes) -> tuple[bool, TransportMessage | str]: return False, f"Expected dict, got {type(raw).__name__}" # Validate required fields required = ("id", "from", "to", "seq", "ack", "payload", "streamId") - for field in required: - if field not in raw: - return False, f"Missing required field: {field}" + for f in required: + if f not in raw: + return False, f"Missing required field: {f}" + # Validate field types to prevent downstream crashes + if not isinstance(raw["seq"], int): + return False, ( + f"Field 'seq' must be int, got {type(raw['seq']).__name__}" + ) + if not isinstance(raw["ack"], int): + return False, ( + f"Field 'ack' must be int, got {type(raw['ack']).__name__}" + ) + if not isinstance(raw["id"], str): + return False, ( + f"Field 'id' must be str, got {type(raw['id']).__name__}" + ) + if not isinstance(raw["streamId"], str): + return False, ( + f"Field 'streamId' must be str, " + f"got {type(raw['streamId']).__name__}" + ) msg = TransportMessage.from_dict(raw) return True, msg except Exception as e: diff --git a/python-client/river/codegen/schema.py b/python-client/river/codegen/schema.py index f04b4417..04c6e6f7 100644 --- a/python-client/river/codegen/schema.py +++ b/python-client/river/codegen/schema.py @@ -155,10 +155,13 @@ class SchemaConverter: def __init__(self) -> None: self._typedicts: list[TypedDictDef] = [] + # $id → assigned Python name (for recursive $ref resolution) + self._id_to_name: dict[str, str] = {} def convert(self, raw: dict) -> SchemaIR: """Convert the top-level serialized schema dict to IR.""" self._typedicts = [] + self._id_to_name = {} services: list[ServiceDef] = [] for svc_name, svc_data in raw.get("services", {}).items(): svc_def = self._convert_service(svc_name, svc_data) @@ -251,6 +254,18 @@ def _schema_to_typeref(self, schema: dict, name_hint: str) -> TypeRef: if not isinstance(schema, dict): return TypeRef(annotation="Any") + # $ref → forward reference to a previously-registered $id + if "$ref" in schema: + ref_id = schema["$ref"] + if ref_id in self._id_to_name: + return TypeRef(annotation=self._id_to_name[ref_id]) + return TypeRef(annotation="Never") + + # $id → register the name before converting (enables recursive refs) + schema_id = schema.get("$id") + if schema_id is not None: + self._id_to_name[schema_id] = name_hint + # const if "const" in schema: val = schema["const"] @@ -266,6 +281,10 @@ def _schema_to_typeref(self, schema: dict, name_hint: str) -> TypeRef: if "anyOf" in schema: return self._convert_union(schema, name_hint) + # allOf (intersection) — merge object properties + if "allOf" in schema: + return self._convert_intersection(schema, name_hint) + schema_type = schema.get("type") # Primitive types @@ -288,8 +307,10 @@ def _schema_to_typeref(self, schema: dict, name_hint: str) -> TypeRef: item_ref = self._schema_to_typeref(items, f"{name_hint}Item") return TypeRef(annotation=f"list[{item_ref.annotation}]") - # Object → TypedDict + # Object → TypedDict (may also contain allOf to merge) if schema_type == "object": + if "allOf" in schema: + return self._convert_intersection(schema, name_hint) return self._convert_object(schema, name_hint) # Fallback @@ -324,6 +345,70 @@ def _convert_object(self, schema: dict, name: str) -> TypeRef: self._typedicts.append(td) return TypeRef(annotation=name) + def _convert_intersection(self, schema: dict, name_hint: str) -> TypeRef: + """Convert a JSON Schema allOf to a merged TypedDict. + + Object variants have their properties merged into a single + TypedDict. A field is required if it appears in the ``required`` + list of *any* variant (intersection semantics). Non-object + variants and empty allOf produce ``Never`` since they represent + unrepresentable or contradictory intersections. + """ + variants = schema.get("allOf", []) + if not variants: + return TypeRef(annotation="Never") + + # Partition into object-like variants and other variants + object_variants: list[dict] = [] + other_variants: list[dict] = [] + for v in variants: + if not isinstance(v, dict): + continue + v_type = v.get("type") + if v_type == "object" or "properties" in v: + object_variants.append(v) + else: + other_variants.append(v) + + # Merge all object properties + merged_props: dict[str, dict] = {} + merged_required: set[str] = set() + for v in object_variants: + for prop_name, prop_schema in v.get("properties", {}).items(): + merged_props[prop_name] = prop_schema + merged_required.update(v.get("required", [])) + + # If we have object properties, emit a TypedDict + if merged_props or object_variants: + description = schema.get("description") + fields: list[TypedDictField] = [] + for prop_name, prop_schema in merged_props.items(): + field_name = _safe_field_name(prop_name) + nested_name = name_hint + _to_pascal_case(prop_name) + field_ref = self._schema_to_typeref(prop_schema, nested_name) + field_desc = ( + prop_schema.get("description") + if isinstance(prop_schema, dict) + else None + ) + fields.append( + TypedDictField( + name=field_name, + type_ref=field_ref, + required=prop_name in merged_required, + description=field_desc, + ) + ) + td = TypedDictDef(name=name_hint, fields=fields, description=description) + self._typedicts.append(td) + return TypeRef(annotation=name_hint) + + # No object variants — primitive intersection is unrepresentable + if other_variants: + return TypeRef(annotation="Never") + + return TypeRef(annotation="Any") + def _convert_union(self, schema: dict, name_hint: str) -> TypeRef: """Convert a JSON Schema anyOf to a Union type.""" variants = schema.get("anyOf", []) diff --git a/python-client/river/transport.py b/python-client/river/transport.py index d5c4130f..4c6d0932 100644 --- a/python-client/river/transport.py +++ b/python-client/river/transport.py @@ -11,7 +11,9 @@ import random from typing import Any, Callable -from river.codec import Codec, CodecMessageAdapter, NaiveJsonCodec +from opentelemetry import propagate + +from river.codec import BinaryCodec, Codec, CodecMessageAdapter from river.session import DEFAULT_SESSION_OPTIONS, Session, SessionOptions, SessionState from river.types import ( RETRIABLE_HANDSHAKE_CODES, @@ -133,7 +135,7 @@ def __init__( self.client_id = client_id or generate_id() self.server_id = server_id or "SERVER" self._ws_url = ws_url - self._codec = codec or NaiveJsonCodec() + self._codec = codec or BinaryCodec() self._codec_adapter = CodecMessageAdapter(self._codec) self.options = options or DEFAULT_SESSION_OPTIONS self._handshake_metadata = handshake_metadata @@ -155,7 +157,17 @@ def get_status(self) -> str: def _get_loop(self) -> asyncio.AbstractEventLoop: if self._loop is None: - self._loop = asyncio.get_running_loop() + try: + self._loop = asyncio.get_running_loop() + except RuntimeError: + try: + loop = asyncio.get_event_loop() + if loop.is_closed(): + raise RuntimeError("closed") + self._loop = loop + except RuntimeError: + self._loop = asyncio.new_event_loop() + asyncio.set_event_loop(self._loop) return self._loop # --- Event API --- @@ -279,11 +291,7 @@ async def _create_connection(self, to: str) -> Any: return ws def _get_otel_propagation_context(self) -> dict[str, str] | None: - """Extract OTel propagation context if opentelemetry is installed.""" - try: - from opentelemetry import propagate - except ImportError: - return None + """Extract OTel propagation context.""" ctx: dict[str, str] = {} propagate.inject(ctx) result = {} diff --git a/python-client/tests/conftest.py b/python-client/tests/conftest.py index 3157c0c5..74c622dc 100644 --- a/python-client/tests/conftest.py +++ b/python-client/tests/conftest.py @@ -17,7 +17,7 @@ import pytest -from river.codec import BinaryCodec, Codec, NaiveJsonCodec +from river.codec import BinaryCodec, Codec TESTS_DIR = os.path.dirname(__file__) SERVER_TS = os.path.join(TESTS_DIR, "test_server.ts") @@ -183,20 +183,9 @@ def generated_client_dir() -> str: @pytest.fixture(scope="session") def river_server_port() -> Generator[int, None, None]: - """Build and start the TypeScript test server (JSON codec), yield its port.""" + """Build and start the TypeScript test server, yield its port.""" _build_test_server() - proc, port = _start_server(SERVER_MJS, "Test server") - yield port - _stop_server(proc) - - -@pytest.fixture(scope="session") -def river_binary_server_port() -> Generator[int, None, None]: - """Build and start the TypeScript test server (binary codec), yield its port.""" - _build_test_server() - proc, port = _start_server( - SERVER_MJS, "Binary test server", env={"RIVER_CODEC": "binary"} - ) + proc, port = _start_server(SERVER_MJS, "Test server", env={"RIVER_CODEC": "binary"}) yield port _stop_server(proc) @@ -222,16 +211,9 @@ def handshake_server_url(river_handshake_server_port: int) -> str: return f"ws://127.0.0.1:{river_handshake_server_port}" -@pytest.fixture(params=["json", "binary"]) +@pytest.fixture def codec_and_url( - request: pytest.FixtureRequest, river_server_port: int, - river_binary_server_port: int, ) -> tuple[Codec, str]: - """Parametrized fixture returning (codec, server_url) pairs. - - Each codec is paired with a server that speaks the same protocol. - """ - if request.param == "json": - return NaiveJsonCodec(), f"ws://127.0.0.1:{river_server_port}" - return BinaryCodec(), f"ws://127.0.0.1:{river_binary_server_port}" + """Return (BinaryCodec(), server_url).""" + return BinaryCodec(), f"ws://127.0.0.1:{river_server_port}" diff --git a/python-client/tests/extract_test_schema.ts b/python-client/tests/extract_test_schema.ts index 85ed26ba..ae6f00cd 100644 --- a/python-client/tests/extract_test_schema.ts +++ b/python-client/tests/extract_test_schema.ts @@ -19,6 +19,13 @@ import { Type } from '@sinclair/typebox'; const ServiceSchema = createServiceSchema(); +const RecursivePayload = Type.Recursive((This) => + Type.Object({ + value: Type.String(), + children: Type.Optional(Type.Array(This)), + }), +); + const TestServiceSchema = ServiceSchema.define({ add: Procedure.rpc({ requestInit: Type.Object({ n: Type.Number() }), @@ -63,6 +70,14 @@ const TestServiceSchema = ServiceSchema.define({ return Ok({ data: reqInit.data, length: reqInit.data.length }); }, }), + echoRecursive: Procedure.rpc({ + requestInit: RecursivePayload, + responseData: RecursivePayload, + responseError: Type.Never(), + async handler({ reqInit }) { + return Ok(reqInit); + }, + }), }); const OrderingServiceSchema = ServiceSchema.define({ diff --git a/python-client/tests/generated/_types.py b/python-client/tests/generated/_types.py index fbf15c29..fc8c8906 100644 --- a/python-client/tests/generated/_types.py +++ b/python-client/tests/generated/_types.py @@ -50,6 +50,16 @@ class TestEchoBinaryOutput(TypedDict): length: float +class TestEchoRecursiveInit(TypedDict): + value: str + children: NotRequired[list[TestEchoRecursiveInit]] + + +class TestEchoRecursiveOutput(TypedDict): + value: str + children: NotRequired[list[TestEchoRecursiveOutput]] + + class OrderingAddInit(TypedDict): n: float diff --git a/python-client/tests/generated/test_client.py b/python-client/tests/generated/test_client.py index 6b9d5a7f..c3def4ed 100644 --- a/python-client/tests/generated/test_client.py +++ b/python-client/tests/generated/test_client.py @@ -22,6 +22,8 @@ TestEchoInit, TestEchoInput, TestEchoOutput, + TestEchoRecursiveInit, + TestEchoRecursiveOutput, TestEchoWithPrefixInit, TestEchoWithPrefixInput, TestEchoWithPrefixOutput, @@ -87,3 +89,16 @@ async def echo_binary( init, abort_signal=abort_signal, ) + + async def echo_recursive( + self, + init: TestEchoRecursiveInit, + *, + abort_signal: asyncio.Event | None = None, + ) -> OkResult[TestEchoRecursiveOutput] | ErrResult[ProtocolError]: + return await self._client.rpc( + "test", + "echoRecursive", + init, + abort_signal=abort_signal, + ) diff --git a/python-client/tests/test_codegen.py b/python-client/tests/test_codegen.py index b081ed37..570556b2 100644 --- a/python-client/tests/test_codegen.py +++ b/python-client/tests/test_codegen.py @@ -250,7 +250,7 @@ def _setup(self, generated_client_dir: str) -> None: async def _make_client(self, server_url: str): from river import ( - NaiveJsonCodec, + BinaryCodec, RiverClient, WebSocketClientTransport, ) @@ -259,7 +259,7 @@ async def _make_client(self, server_url: str): server_url, client_id="test-codegen-client", server_id="SERVER", - codec=NaiveJsonCodec(), + codec=BinaryCodec(), ) client = RiverClient(transport, server_id="SERVER") return client, transport @@ -506,3 +506,711 @@ def test_valid_schema_passes(self): assert ref.annotation == "TestObj" td = converter._typedicts[-1] assert [f.name for f in td.fields] == ["userId", "count"] + + +# --------------------------------------------------------------------------- +# Complex type tests +# --------------------------------------------------------------------------- + + +class TestComplexTypes: + """Test codegen with complex JSON Schema types.""" + + def _convert(self, schema: dict, name: str = "Test"): + from river.codegen.schema import SchemaConverter + + converter = SchemaConverter() + ref = converter._schema_to_typeref(schema, name) + return ref, converter._typedicts + + # -- Deeply nested objects -- + + def test_deeply_nested_objects(self): + """Objects nested 4 levels deep get path-derived names.""" + schema = { + "type": "object", + "properties": { + "level1": { + "type": "object", + "properties": { + "level2": { + "type": "object", + "properties": { + "level3": { + "type": "object", + "properties": { + "value": {"type": "string"}, + }, + "required": ["value"], + } + }, + "required": ["level3"], + } + }, + "required": ["level2"], + } + }, + "required": ["level1"], + } + ref, tds = self._convert(schema, "Root") + assert ref.annotation == "Root" + + td_names = [td.name for td in tds] + assert "Root" in td_names + assert "RootLevel1" in td_names + assert "RootLevel1Level2" in td_names + assert "RootLevel1Level2Level3" in td_names + + # Innermost TypedDict has the value field + innermost = next(td for td in tds if td.name == "RootLevel1Level2Level3") + assert len(innermost.fields) == 1 + assert innermost.fields[0].name == "value" + assert innermost.fields[0].type_ref.annotation == "str" + + def test_nested_object_in_array(self): + """Array of objects creates a TypedDict for the item type.""" + schema = { + "type": "array", + "items": { + "type": "object", + "properties": { + "id": {"type": "number"}, + "name": {"type": "string"}, + }, + "required": ["id", "name"], + }, + } + ref, tds = self._convert(schema, "ItemList") + assert ref.annotation == "list[ItemListItem]" + assert any(td.name == "ItemListItem" for td in tds) + + def test_nested_array_of_arrays(self): + """Nested arrays: list[list[str]].""" + schema = { + "type": "array", + "items": { + "type": "array", + "items": {"type": "string"}, + }, + } + ref, _ = self._convert(schema, "Matrix") + assert ref.annotation == "list[list[str]]" + + # -- Union types (anyOf) -- + + def test_discriminated_union_with_code_field(self): + """anyOf with const code fields → named TypedDicts.""" + schema = { + "anyOf": [ + { + "type": "object", + "properties": { + "code": {"const": "SUCCESS"}, + "data": {"type": "string"}, + }, + "required": ["code", "data"], + }, + { + "type": "object", + "properties": { + "code": {"const": "FAILURE"}, + "reason": {"type": "string"}, + }, + "required": ["code", "reason"], + }, + ] + } + ref, tds = self._convert(schema, "Result") + assert "ResultSuccess" in ref.annotation + assert "ResultFailure" in ref.annotation + assert "|" in ref.annotation + + td_names = {td.name for td in tds} + assert "ResultSuccess" in td_names + assert "ResultFailure" in td_names + + def test_non_discriminated_union_objects(self): + """anyOf with objects but no const code → indexed variant names.""" + schema = { + "anyOf": [ + { + "type": "object", + "properties": {"x": {"type": "number"}}, + "required": ["x"], + }, + { + "type": "object", + "properties": {"y": {"type": "string"}}, + "required": ["y"], + }, + ] + } + ref, tds = self._convert(schema, "Point") + # Without code or description, should get Variant0/Variant1 + assert "PointVariant0" in ref.annotation + assert "PointVariant1" in ref.annotation + + def test_union_mixed_types_primitives_and_objects(self): + """anyOf mixing primitives and objects.""" + schema = { + "anyOf": [ + {"type": "string"}, + {"type": "number"}, + { + "type": "object", + "properties": {"value": {"type": "boolean"}}, + "required": ["value"], + }, + ] + } + ref, tds = self._convert(schema, "Mixed") + # Should include str, float, and a TypedDict + assert "str" in ref.annotation + assert "float" in ref.annotation + assert "MixedVariant2" in ref.annotation + assert any(td.name == "MixedVariant2" for td in tds) + + def test_union_with_null(self): + """anyOf with null → includes None in union.""" + schema = { + "anyOf": [ + {"type": "string"}, + {"type": "null"}, + ] + } + ref, _ = self._convert(schema, "Nullable") + assert "str" in ref.annotation + assert "None" in ref.annotation + + def test_union_primitives_only(self): + """anyOf with only primitives → no TypedDicts created.""" + schema = { + "anyOf": [ + {"type": "string"}, + {"type": "number"}, + {"type": "boolean"}, + ] + } + ref, tds = self._convert(schema, "Prim") + assert ref.annotation == "str | float | bool" + # No TypedDicts should be created for primitives + assert len(tds) == 0 + + def test_single_variant_anyof_unwrapped(self): + """anyOf with a single variant is unwrapped.""" + schema = { + "anyOf": [ + {"type": "string"}, + ] + } + ref, _ = self._convert(schema, "Single") + assert ref.annotation == "str" + + def test_union_with_description_variants(self): + """anyOf variants with descriptions use them for names.""" + schema = { + "anyOf": [ + { + "description": "Circle", + "type": "object", + "properties": {"radius": {"type": "number"}}, + "required": ["radius"], + }, + { + "description": "Rectangle", + "type": "object", + "properties": { + "width": {"type": "number"}, + "height": {"type": "number"}, + }, + "required": ["width", "height"], + }, + ] + } + ref, tds = self._convert(schema, "Shape") + assert "ShapeCircle" in ref.annotation + assert "ShapeRectangle" in ref.annotation + + # -- Recursive / self-referencing schemas -- + + def test_recursive_ref_with_id(self): + """$id/$ref pair → forward reference by name.""" + schema = { + "$id": "T0", + "type": "object", + "properties": { + "n": {"type": "number"}, + "next": {"$ref": "T0"}, + }, + "required": ["n"], + } + ref, tds = self._convert(schema, "TreeNode") + assert ref.annotation == "TreeNode" + td = next(td for td in tds if td.name == "TreeNode") + next_field = next(f for f in td.fields if f.name == "next") + # Should be a forward reference to itself, not Any + assert next_field.type_ref.annotation == "TreeNode" + + def test_recursive_ref_in_array(self): + """Recursive type used as array items.""" + schema = { + "$id": "Node", + "type": "object", + "properties": { + "value": {"type": "string"}, + "children": { + "type": "array", + "items": {"$ref": "Node"}, + }, + }, + "required": ["value"], + } + ref, tds = self._convert(schema, "TreeNode") + assert ref.annotation == "TreeNode" + td = next(td for td in tds if td.name == "TreeNode") + children_field = next(f for f in td.fields if f.name == "children") + assert children_field.type_ref.annotation == "list[TreeNode]" + + def test_unknown_ref_is_never(self): + """$ref to an unknown $id → Never (broken schema).""" + schema = {"$ref": "NonExistent"} + ref, _ = self._convert(schema, "X") + assert ref.annotation == "Never" + + def test_multiple_recursive_types(self): + """Two independent recursive types don't collide.""" + from river.codegen.schema import SchemaConverter + + converter = SchemaConverter() + + schema_a = { + "$id": "A", + "type": "object", + "properties": { + "val": {"type": "number"}, + "link": {"$ref": "A"}, + }, + "required": ["val"], + } + schema_b = { + "$id": "B", + "type": "object", + "properties": { + "name": {"type": "string"}, + "parent": {"$ref": "B"}, + }, + "required": ["name"], + } + + ref_a = converter._schema_to_typeref(schema_a, "LinkedList") + ref_b = converter._schema_to_typeref(schema_b, "Category") + + assert ref_a.annotation == "LinkedList" + assert ref_b.annotation == "Category" + + tds = converter._typedicts + ll = next(td for td in tds if td.name == "LinkedList") + cat = next(td for td in tds if td.name == "Category") + + link_field = next(f for f in ll.fields if f.name == "link") + assert link_field.type_ref.annotation == "LinkedList" + + parent_field = next(f for f in cat.fields if f.name == "parent") + assert parent_field.type_ref.annotation == "Category" + + # -- Const values -- + + def test_const_string(self): + ref, _ = self._convert({"const": "hello"}, "X") + assert ref.annotation == 'Literal["hello"]' + + def test_const_number(self): + ref, _ = self._convert({"const": 42}, "X") + assert ref.annotation == "Literal[42]" + + def test_const_boolean(self): + ref, _ = self._convert({"const": True}, "X") + assert ref.annotation == "Literal[True]" + + def test_const_string_with_special_chars(self): + """Const strings with quotes/backslashes are properly escaped.""" + ref, _ = self._convert({"const": 'say "hello"'}, "X") + assert "Literal[" in ref.annotation + # Should be valid Python — no unescaped quotes + assert ref.annotation.count('"') % 2 == 0 or '\\"' in ref.annotation + + # -- Edge cases -- + + def test_empty_object(self): + """Object with no properties → TypedDict with pass.""" + schema = {"type": "object", "properties": {}} + ref, tds = self._convert(schema, "Empty") + assert ref.annotation == "Empty" + td = next(td for td in tds if td.name == "Empty") + assert len(td.fields) == 0 + + def test_object_all_optional_fields(self): + """Object with no required fields → all NotRequired.""" + schema = { + "type": "object", + "properties": { + "a": {"type": "string"}, + "b": {"type": "number"}, + }, + # no "required" key + } + ref, tds = self._convert(schema, "Opts") + td = next(td for td in tds if td.name == "Opts") + assert all(not f.required for f in td.fields) + + def test_object_mixed_required_optional(self): + """Object with some required, some optional fields.""" + schema = { + "type": "object", + "properties": { + "id": {"type": "number"}, + "name": {"type": "string"}, + "email": {"type": "string"}, + }, + "required": ["id"], + } + ref, tds = self._convert(schema, "User") + td = next(td for td in tds if td.name == "User") + field_map = {f.name: f for f in td.fields} + assert field_map["id"].required is True + assert field_map["name"].required is False + assert field_map["email"].required is False + + def test_unknown_type_falls_back_to_any(self): + """Unrecognized type string → Any.""" + ref, _ = self._convert({"type": "foobar"}, "X") + assert ref.annotation == "Any" + + def test_no_type_no_anyof_no_const_falls_back_to_any(self): + """Schema with no recognizable keys → Any.""" + ref, _ = self._convert({"description": "mystery"}, "X") + assert ref.annotation == "Any" + + def test_non_dict_schema_falls_back_to_any(self): + """Non-dict passed as schema → Any.""" + from river.codegen.schema import SchemaConverter + + converter = SchemaConverter() + ref = converter._schema_to_typeref("not a dict", "X") # type: ignore[arg-type] + assert ref.annotation == "Any" + + def test_array_with_no_items(self): + """Array with no items key → list[Any].""" + ref, _ = self._convert({"type": "array"}, "X") + assert ref.annotation == "list[Any]" + + def test_all_primitive_types(self): + """All primitive JSON Schema types map correctly.""" + cases = { + "string": "str", + "number": "float", + "integer": "int", + "boolean": "bool", + "null": "None", + "Uint8Array": "bytes", + } + for json_type, py_type in cases.items(): + ref, _ = self._convert({"type": json_type}, "X") + assert ref.annotation == py_type, f"Failed for {json_type}" + + # -- allOf (intersection) -- + + def test_allof_merges_object_properties(self): + """allOf with objects → merged TypedDict.""" + schema = { + "allOf": [ + { + "type": "object", + "properties": { + "a": {"type": "string"}, + "b": {"type": "number"}, + }, + "required": ["a", "b"], + }, + { + "type": "object", + "properties": { + "c": {"type": "boolean"}, + }, + "required": ["c"], + }, + ] + } + ref, tds = self._convert(schema, "Merged") + assert ref.annotation == "Merged" + td = next(td for td in tds if td.name == "Merged") + field_map = {f.name: f for f in td.fields} + assert field_map["a"].type_ref.annotation == "str" + assert field_map["b"].type_ref.annotation == "float" + assert field_map["c"].type_ref.annotation == "bool" + assert all(f.required for f in td.fields) + + def test_allof_with_type_object_wrapper(self): + """TypeBox emits {type: 'object', allOf: [...]} — both forms work.""" + schema = { + "type": "object", + "allOf": [ + { + "type": "object", + "properties": {"x": {"type": "number"}}, + "required": ["x"], + }, + { + "type": "object", + "properties": {"y": {"type": "number"}}, + "required": ["y"], + }, + ], + } + ref, tds = self._convert(schema, "Point") + assert ref.annotation == "Point" + td = next(td for td in tds if td.name == "Point") + assert {f.name for f in td.fields} == {"x", "y"} + + def test_allof_overlapping_fields(self): + """Overlapping properties in allOf → last definition wins.""" + schema = { + "allOf": [ + { + "type": "object", + "properties": { + "id": {"type": "string"}, + "name": {"type": "string"}, + }, + "required": ["id", "name"], + }, + { + "type": "object", + "properties": { + "name": {"type": "string"}, + "age": {"type": "number"}, + }, + "required": ["name"], + }, + ] + } + ref, tds = self._convert(schema, "Person") + assert ref.annotation == "Person" + td = next(td for td in tds if td.name == "Person") + field_map = {f.name: f for f in td.fields} + # "id" required from first, "name" required from both, "age" optional + assert field_map["id"].required is True + assert field_map["name"].required is True + assert field_map["age"].required is False + + def test_allof_with_nested_objects(self): + """allOf variants can contain nested objects.""" + schema = { + "allOf": [ + { + "type": "object", + "properties": { + "meta": { + "type": "object", + "properties": {"version": {"type": "number"}}, + "required": ["version"], + } + }, + "required": ["meta"], + }, + { + "type": "object", + "properties": { + "data": {"type": "string"}, + }, + "required": ["data"], + }, + ] + } + ref, tds = self._convert(schema, "Envelope") + assert ref.annotation == "Envelope" + td_names = {td.name for td in tds} + assert "Envelope" in td_names + assert "EnvelopeMeta" in td_names + + def test_allof_mixed_types_merges_objects(self): + """allOf with object + primitive → object properties still merged.""" + schema = { + "allOf": [ + { + "type": "object", + "properties": {"x": {"type": "number"}}, + "required": ["x"], + }, + {"type": "string"}, + ] + } + ref, tds = self._convert(schema, "Mixed") + # Object properties are merged; primitive constraint is ignored + assert ref.annotation == "Mixed" + td = next(td for td in tds if td.name == "Mixed") + assert len(td.fields) == 1 + assert td.fields[0].name == "x" + + def test_allof_only_primitives_is_never(self): + """allOf with only primitives → Never (contradictory intersection).""" + schema = { + "allOf": [ + {"type": "string"}, + {"type": "number"}, + ] + } + ref, _ = self._convert(schema, "Weird") + assert ref.annotation == "Never" + + def test_allof_empty_is_never(self): + """allOf with no variants → Never.""" + schema = {"allOf": []} + ref, _ = self._convert(schema, "Empty") + assert ref.annotation == "Never" + + # -- Full service schema with complex types -- + + def test_service_with_complex_types(self): + """Full service schema with unions, nested objects, arrays.""" + from river.codegen.schema import SchemaConverter + + raw = { + "services": { + "complex": { + "procedures": { + "transform": { + "type": "rpc", + "init": { + "type": "object", + "properties": { + "input": { + "anyOf": [ + {"type": "string"}, + {"type": "number"}, + { + "type": "object", + "properties": { + "nested": { + "type": "object", + "properties": { + "deep": {"type": "boolean"} + }, + "required": ["deep"], + } + }, + "required": ["nested"], + }, + ] + }, + "tags": { + "type": "array", + "items": { + "type": "object", + "properties": { + "key": {"type": "string"}, + "value": {"type": "string"}, + }, + "required": ["key", "value"], + }, + }, + }, + "required": ["input"], + }, + "output": { + "type": "object", + "properties": { + "result": {"type": "string"}, + }, + "required": ["result"], + }, + "errors": { + "anyOf": [ + { + "properties": { + "code": {"const": "UNCAUGHT_ERROR"}, + "message": {"type": "string"}, + }, + "required": ["code", "message"], + "type": "object", + }, + { + "properties": { + "code": {"const": "UNEXPECTED_DISCONNECT"}, + "message": {"type": "string"}, + }, + "required": ["code", "message"], + "type": "object", + }, + { + "properties": { + "code": {"const": "INVALID_REQUEST"}, + "message": {"type": "string"}, + }, + "required": ["code", "message"], + "type": "object", + }, + { + "properties": { + "code": {"const": "CANCEL"}, + "message": {"type": "string"}, + }, + "required": ["code", "message"], + "type": "object", + }, + { + "properties": { + "code": {"const": "TRANSFORM_FAILED"}, + "message": {"type": "string"}, + "details": { + "type": "object", + "properties": { + "field": {"type": "string"}, + "reason": {"type": "string"}, + }, + "required": ["field", "reason"], + }, + }, + "required": ["code", "message"], + "type": "object", + }, + ] + }, + } + } + } + } + } + + converter = SchemaConverter() + ir = converter.convert(raw) + + assert len(ir.services) == 1 + svc = ir.services[0] + assert svc.name == "complex" + assert len(svc.procedures) == 1 + + proc = svc.procedures[0] + assert proc.name == "transform" + assert proc.py_name == "transform" + + # Init should have created TypedDicts for nested objects + td_names = {td.name for td in ir.typedicts} + assert "ComplexTransformInit" in td_names + assert "ComplexTransformOutput" in td_names + + # The service error should be extracted (TRANSFORM_FAILED is the + # only non-protocol error, so it gets the unsuffixed name) + assert proc.error_type is not None + assert proc.error_type.annotation == "ComplexTransformError" + + # The union input field → str | float | TypedDict + init_td = next(td for td in ir.typedicts if td.name == "ComplexTransformInit") + input_field = next(f for f in init_td.fields if f.name == "input") + assert "str" in input_field.type_ref.annotation + assert "float" in input_field.type_ref.annotation + + # Tags array of objects + tags_field = next((f for f in init_td.fields if f.name == "tags"), None) + assert tags_field is not None + assert "list[" in tags_field.type_ref.annotation diff --git a/python-client/tests/test_e2e.py b/python-client/tests/test_e2e.py index 3ba2df6e..9f373b6b 100644 --- a/python-client/tests/test_e2e.py +++ b/python-client/tests/test_e2e.py @@ -11,7 +11,7 @@ import pytest from river.client import RiverClient -from river.codec import NaiveJsonCodec +from river.codec import BinaryCodec from river.transport import WebSocketClientTransport from tests.test_utils import wait_for_connected @@ -24,7 +24,7 @@ async def make_client(server_url: str, **kwargs) -> RiverClient: ws_url=server_url, client_id=None, # auto-generate server_id="SERVER", - codec=NaiveJsonCodec(), + codec=BinaryCodec(), ) return RiverClient( transport, @@ -563,7 +563,7 @@ async def test_eagerly_connect(self, server_url: str): transport = WebSocketClientTransport( ws_url=server_url, server_id="SERVER", - codec=NaiveJsonCodec(), + codec=BinaryCodec(), ) client = RiverClient(transport, server_id="SERVER", eagerly_connect=True) try: @@ -588,7 +588,7 @@ async def test_reconnect_with_concurrent_streams(self, server_url: str): transport = WebSocketClientTransport( ws_url=server_url, server_id="SERVER", - codec=NaiveJsonCodec(), + codec=BinaryCodec(), ) client = RiverClient(transport, server_id="SERVER") try: @@ -655,21 +655,6 @@ async def test_reconnect_with_concurrent_streams(self, server_url: str): class TestCodec: - @pytest.mark.asyncio - async def test_json_codec_rpc(self, server_url: str): - """JSON codec works for basic RPC.""" - transport = WebSocketClientTransport( - ws_url=server_url, - server_id="SERVER", - codec=NaiveJsonCodec(), - ) - client = RiverClient(transport, server_id="SERVER") - try: - result = await client.rpc("test", "add", {"n": 5}) - assert result["ok"] is True - finally: - await transport.close() - @pytest.mark.asyncio async def test_binary_codec_roundtrip(self): """Binary (msgpack) codec encodes and decodes transport messages.""" @@ -1213,27 +1198,6 @@ async def test_break_ends_iteration_midstream(self): class TestCodecUnit: - def test_json_codec_encode_decode(self): - """JSON codec round-trips correctly.""" - from river.codec import NaiveJsonCodec - - codec = NaiveJsonCodec() - obj = {"key": "value", "num": 42, "nested": {"a": [1, 2, 3]}} - buf = codec.to_buffer(obj) - assert isinstance(buf, bytes) - result = codec.from_buffer(buf) - assert result == obj - - def test_json_codec_bytes_handling(self): - """JSON codec handles bytes via base64.""" - from river.codec import NaiveJsonCodec - - codec = NaiveJsonCodec() - obj = {"data": b"\x00\x01\x02\xff"} - buf = codec.to_buffer(obj) - result = codec.from_buffer(buf) - assert result["data"] == b"\x00\x01\x02\xff" - def test_binary_codec_encode_decode(self): """Binary (msgpack) codec round-trips correctly.""" from river.codec import BinaryCodec @@ -1247,10 +1211,10 @@ def test_binary_codec_encode_decode(self): def test_codec_adapter_valid(self): """CodecMessageAdapter encodes and decodes transport messages.""" - from river.codec import CodecMessageAdapter, NaiveJsonCodec + from river.codec import BinaryCodec, CodecMessageAdapter from river.types import TransportMessage - adapter = CodecMessageAdapter(NaiveJsonCodec()) + adapter = CodecMessageAdapter(BinaryCodec()) msg = TransportMessage( id="abc", from_="c1", @@ -1271,13 +1235,54 @@ def test_codec_adapter_valid(self): def test_codec_adapter_invalid_buffer(self): """CodecMessageAdapter returns error on invalid bytes.""" - from river.codec import CodecMessageAdapter, NaiveJsonCodec + from river.codec import BinaryCodec, CodecMessageAdapter - adapter = CodecMessageAdapter(NaiveJsonCodec()) + adapter = CodecMessageAdapter(BinaryCodec()) ok, result = adapter.from_buffer(b"not valid json") assert ok is False assert isinstance(result, str) + def test_codec_adapter_rejects_wrong_seq_type(self): + """CodecMessageAdapter rejects seq that is not an int.""" + from river.codec import BinaryCodec, CodecMessageAdapter + + adapter = CodecMessageAdapter(BinaryCodec()) + raw = BinaryCodec().to_buffer( + { + "id": "m1", + "from": "s", + "to": "c", + "seq": "0", # wrong type + "ack": 0, + "payload": {}, + "streamId": "st1", + } + ) + ok, result = adapter.from_buffer(raw) + assert ok is False + assert "seq" in result + assert "str" in result + + def test_codec_adapter_rejects_wrong_ack_type(self): + """CodecMessageAdapter rejects ack that is not an int.""" + from river.codec import BinaryCodec, CodecMessageAdapter + + adapter = CodecMessageAdapter(BinaryCodec()) + raw = BinaryCodec().to_buffer( + { + "id": "m1", + "from": "s", + "to": "c", + "seq": 0, + "ack": "0", # wrong type + "payload": {}, + "streamId": "st1", + } + ) + ok, result = adapter.from_buffer(raw) + assert ok is False + assert "ack" in result + # ===================================================================== # Lifecycle / Cleanup Tests @@ -1293,7 +1298,7 @@ async def test_cancel_cleans_up_listeners(self, server_url: str): transport = WebSocketClientTransport( ws_url=server_url, server_id="SERVER", - codec=NaiveJsonCodec(), + codec=BinaryCodec(), ) client = RiverClient(transport, server_id="SERVER") try: @@ -1319,7 +1324,7 @@ async def test_repeated_cancels_do_not_leak(self, server_url: str): transport = WebSocketClientTransport( ws_url=server_url, server_id="SERVER", - codec=NaiveJsonCodec(), + codec=BinaryCodec(), ) client = RiverClient(transport, server_id="SERVER") try: @@ -1343,7 +1348,7 @@ async def test_abort_task_cancelled_on_normal_close(self, server_url: str): transport = WebSocketClientTransport( ws_url=server_url, server_id="SERVER", - codec=NaiveJsonCodec(), + codec=BinaryCodec(), ) client = RiverClient(transport, server_id="SERVER") try: @@ -1412,10 +1417,10 @@ def test_handshake_stream_id_is_random(self): The protocol requires a random streamId for handshakes. """ - from river.codec import CodecMessageAdapter, NaiveJsonCodec + from river.codec import BinaryCodec, CodecMessageAdapter from river.session import Session - codec = CodecMessageAdapter(NaiveJsonCodec()) + codec = CodecMessageAdapter(BinaryCodec()) s1 = Session("sess1", "client", "server", codec) s2 = Session("sess2", "client", "server", codec) @@ -1514,7 +1519,7 @@ def test_failed_send_destroys_session(self): """Send failure on a connected session destroys it.""" from unittest.mock import AsyncMock - from river.codec import CodecMessageAdapter, NaiveJsonCodec + from river.codec import BinaryCodec, CodecMessageAdapter from river.session import Session, SessionState from river.transport import WebSocketClientTransport @@ -1522,9 +1527,9 @@ def test_failed_send_destroys_session(self): ws_url="ws://127.0.0.1:1", client_id="client", server_id="server", - codec=NaiveJsonCodec(), + codec=BinaryCodec(), ) - codec = CodecMessageAdapter(NaiveJsonCodec()) + codec = CodecMessageAdapter(BinaryCodec()) session = Session("s1", "client", "server", codec) session.state = SessionState.CONNECTED session._ws = AsyncMock() @@ -1556,11 +1561,11 @@ def test_failed_send_seq_consumed(self): """ from unittest.mock import AsyncMock - from river.codec import CodecMessageAdapter, NaiveJsonCodec + from river.codec import BinaryCodec, CodecMessageAdapter from river.session import Session, SessionState from river.types import PartialTransportMessage - codec = CodecMessageAdapter(NaiveJsonCodec()) + codec = CodecMessageAdapter(BinaryCodec()) session = Session("s1", "client", "server", codec) session.state = SessionState.CONNECTED session._ws = AsyncMock() @@ -1581,7 +1586,7 @@ def test_failed_send_seq_consumed(self): def test_invalid_message_destroys_session(self): """Receiving a corrupt message destroys the session.""" - from river.codec import CodecMessageAdapter, NaiveJsonCodec + from river.codec import BinaryCodec, CodecMessageAdapter from river.session import Session, SessionState from river.transport import WebSocketClientTransport @@ -1589,9 +1594,9 @@ def test_invalid_message_destroys_session(self): ws_url="ws://127.0.0.1:1", client_id="client", server_id="server", - codec=NaiveJsonCodec(), + codec=BinaryCodec(), ) - codec = CodecMessageAdapter(NaiveJsonCodec()) + codec = CodecMessageAdapter(BinaryCodec()) session = Session("s1", "client", "server", codec) session.state = SessionState.CONNECTED transport.sessions["server"] = session @@ -1636,31 +1641,6 @@ def test_frozen_session_options(self): except AttributeError: pass # frozen dataclass raises AttributeError on mutation - def test_json_codec_large_int_encoding(self): - """Large ints beyond JS safe integer range are encoded as $b.""" - from river.codec import NaiveJsonCodec - - codec = NaiveJsonCodec() - large = 2**53 + 1 - buf = codec.to_buffer({"n": large}) - decoded = codec.from_buffer(buf) - assert decoded["n"] == large - - # Normal ints should NOT be encoded as $b - buf2 = codec.to_buffer({"n": 42}) - raw = buf2.decode("utf-8") - assert "$b" not in raw - - def test_json_codec_negative_large_int(self): - """Negative large ints are also encoded as $b.""" - from river.codec import NaiveJsonCodec - - codec = NaiveJsonCodec() - large_neg = -(2**53 + 1) - buf = codec.to_buffer({"n": large_neg}) - decoded = codec.from_buffer(buf) - assert decoded["n"] == large_neg - def test_binary_codec_large_int(self): """Binary codec handles ints beyond msgpack native range.""" from river.codec import BinaryCodec @@ -1724,7 +1704,7 @@ async def test_connection_failed_starts_grace_period(self, server_url: str): ws_url="ws://127.0.0.1:1", # unreachable client_id=None, server_id="UNREACHABLE", - codec=NaiveJsonCodec(), + codec=BinaryCodec(), options=SessionOptions( connection_timeout_ms=100, session_disconnect_grace_ms=300, @@ -1747,7 +1727,7 @@ def test_enable_transparent_reconnects_option(self): ws_url="ws://127.0.0.1:1", client_id=None, server_id="SERVER", - codec=NaiveJsonCodec(), + codec=BinaryCodec(), options=opts, ) assert transport.reconnect_on_connection_drop is False @@ -1792,7 +1772,7 @@ async def test_close_cancels_inflight_connect(self, server_url: str): ws_url=server_url, client_id=None, server_id="SERVER", - codec=NaiveJsonCodec(), + codec=BinaryCodec(), ) transport.connect("SERVER") # Let connection start but don't wait for completion @@ -1811,14 +1791,14 @@ class TestOtelTracingPropagation: def test_handshake_includes_tracing_when_otel_available(self): """Handshake message includes tracing when OTel propagation is configured.""" - from river.codec import CodecMessageAdapter, NaiveJsonCodec + from river.codec import BinaryCodec, CodecMessageAdapter from river.session import Session session = Session( session_id="test-session", from_id="client", to_id="server", - codec=CodecMessageAdapter(NaiveJsonCodec()), + codec=CodecMessageAdapter(BinaryCodec()), ) tracing = { @@ -1832,14 +1812,14 @@ def test_handshake_includes_tracing_when_otel_available(self): def test_handshake_omits_tracing_when_none(self): """Handshake message omits tracing when not provided.""" - from river.codec import CodecMessageAdapter, NaiveJsonCodec + from river.codec import BinaryCodec, CodecMessageAdapter from river.session import Session session = Session( session_id="test-session", from_id="client", to_id="server", - codec=CodecMessageAdapter(NaiveJsonCodec()), + codec=CodecMessageAdapter(BinaryCodec()), ) msg = session.create_handshake_request() @@ -1849,29 +1829,22 @@ def test_handshake_omits_tracing_when_none(self): def test_get_otel_propagation_context_with_mock(self): """_get_otel_propagation_context extracts traceparent/tracestate.""" - import types from unittest.mock import patch transport = WebSocketClientTransport( ws_url="ws://localhost:0", client_id="test", server_id="SERVER", - codec=NaiveJsonCodec(), + codec=BinaryCodec(), ) def fake_inject(carrier): carrier["traceparent"] = "00-tid-sid-01" carrier["tracestate"] = "k=v" - # Create a fake opentelemetry module with a propagate submodule - fake_otel = types.ModuleType("opentelemetry") - fake_propagate = types.ModuleType("opentelemetry.propagate") - fake_propagate.inject = fake_inject # type: ignore[attr-defined] - fake_otel.propagate = fake_propagate # type: ignore[attr-defined] - - with patch.dict( - "sys.modules", - {"opentelemetry": fake_otel, "opentelemetry.propagate": fake_propagate}, + with patch( + "river.transport.propagate.inject", + side_effect=fake_inject, ): result = transport._get_otel_propagation_context() @@ -1880,19 +1853,73 @@ def fake_inject(carrier): "tracestate": "k=v", } - def test_get_otel_propagation_context_without_otel(self): - """_get_otel_propagation_context returns None when OTel is not installed.""" - from unittest.mock import patch +# ===================================================================== +# Eager connect in sync context +# ===================================================================== + + +class TestEagerConnectSync: + def test_eager_connect_does_not_raise_outside_loop(self): + """Constructing with eagerly_connect=True outside an event loop + should not raise RuntimeError.""" transport = WebSocketClientTransport( - ws_url="ws://localhost:0", - client_id="test", + ws_url="ws://127.0.0.1:1", server_id="SERVER", - codec=NaiveJsonCodec(), + codec=BinaryCodec(), ) + # This used to raise "no running event loop" + RiverClient(transport, server_id="SERVER", eagerly_connect=True) - # Ensure opentelemetry is not importable - with patch.dict("sys.modules", {"opentelemetry": None}): - result = transport._get_otel_propagation_context() - assert result is None +# ===================================================================== +# Cancel frame validation +# ===================================================================== + + +class TestCancelFrameValidation: + @pytest.mark.asyncio + async def test_server_cancel_always_error_shaped(self, server_url: str): + """Server-initiated cancel always yields an error result.""" + client = await make_client(server_url) + try: + # cancellableAdd cancels when total >= 10 + upload = client.upload("uploadable", "cancellableAdd", {}) + upload.req_writable.write({"n": 15}) + # Server sends cancel with Err payload + result = await asyncio.wait_for(upload.finalize(), timeout=5.0) + assert result["ok"] is False + assert result["payload"]["code"] == "CANCEL" + finally: + await cleanup_client(client) + + def test_cancel_frame_ok_true_forced_to_error(self): + """A cancel payload with ok:true is coerced to error shape.""" + from river.types import ControlFlags, err_result + + # Simulate what on_message does with a cancel frame + payload = {"ok": True, "payload": {"unexpected": "success"}} + flags = ControlFlags.StreamCancelBit + + # The fix: cancel frames with ok:True get forced to err_result + from river.types import is_stream_cancel + + assert is_stream_cancel(flags) + # After the fix, the code checks `not payload["ok"]` — so + # ok:True falls through to the error branch + if isinstance(payload, dict) and "ok" in payload and not payload["ok"]: + result = payload + else: + code = ( + payload.get("code", "UNKNOWN") + if isinstance(payload, dict) + else "UNKNOWN" + ) + message = ( + payload.get("message", str(payload)) + if isinstance(payload, dict) + else str(payload) + ) + result = err_result(code, message) + + assert result["ok"] is False diff --git a/python-client/tests/test_equivalence.py b/python-client/tests/test_equivalence.py index 1392c235..d1dc27bf 100644 --- a/python-client/tests/test_equivalence.py +++ b/python-client/tests/test_equivalence.py @@ -1,8 +1,6 @@ -"""Cross-codec parametrized equivalence tests. +"""Equivalence tests for the River Python client. -Every test in this module runs against both NaiveJsonCodec and BinaryCodec, -proving that both codecs produce identical behavior against the TS server. -Each codec is paired with a matching server (JSON or binary). +Tests all procedure types against the TS test server using BinaryCodec. """ from __future__ import annotations diff --git a/python-client/tests/test_handshake.py b/python-client/tests/test_handshake.py index 978afe53..8c1b831a 100644 --- a/python-client/tests/test_handshake.py +++ b/python-client/tests/test_handshake.py @@ -9,7 +9,7 @@ import pytest from river.client import RiverClient -from river.codec import NaiveJsonCodec +from river.codec import BinaryCodec from river.transport import WebSocketClientTransport from tests.test_utils import wait_for_connected, wait_for_event @@ -22,7 +22,7 @@ async def make_handshake_client( ws_url=server_url, client_id=None, server_id="HANDSHAKE_SERVER", - codec=NaiveJsonCodec(), + codec=BinaryCodec(), handshake_metadata=handshake_metadata, ) return RiverClient( @@ -61,7 +61,7 @@ async def test_handshake_with_invalid_metadata_emits_error( ws_url=handshake_server_url, client_id=None, server_id="HANDSHAKE_SERVER", - codec=NaiveJsonCodec(), + codec=BinaryCodec(), handshake_metadata={"token": "wrong-token"}, ) try: @@ -83,7 +83,7 @@ async def test_handshake_with_missing_metadata_emits_error( ws_url=handshake_server_url, client_id=None, server_id="HANDSHAKE_SERVER", - codec=NaiveJsonCodec(), + codec=BinaryCodec(), handshake_metadata=None, ) try: diff --git a/python-client/tests/test_schema.json b/python-client/tests/test_schema.json index b02ac0c0..96d0f025 100644 --- a/python-client/tests/test_schema.json +++ b/python-client/tests/test_schema.json @@ -518,6 +518,143 @@ ] }, "type": "rpc" + }, + "echoRecursive": { + "init": { + "$id": "T0", + "type": "object", + "properties": { + "value": { + "type": "string" + }, + "children": { + "type": "array", + "items": { + "$ref": "T0" + } + } + }, + "required": [ + "value" + ] + }, + "output": { + "$id": "T0", + "type": "object", + "properties": { + "value": { + "type": "string" + }, + "children": { + "type": "array", + "items": { + "$ref": "T0" + } + } + }, + "required": [ + "value" + ] + }, + "errors": { + "anyOf": [ + { + "type": "object", + "properties": { + "code": { + "const": "UNCAUGHT_ERROR", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "UNEXPECTED_DISCONNECT", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "INVALID_REQUEST", + "type": "string" + }, + "message": { + "type": "string" + }, + "extras": { + "type": "object", + "properties": { + "firstValidationErrors": { + "type": "array", + "items": { + "type": "object", + "properties": { + "path": { + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "path", + "message" + ] + } + }, + "totalErrors": { + "type": "number" + } + }, + "required": [ + "firstValidationErrors", + "totalErrors" + ] + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "CANCEL", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + } + ] + }, + "type": "rpc" } } }, diff --git a/python-client/tests/test_server.ts b/python-client/tests/test_server.ts index 906261f6..924ba3cb 100644 --- a/python-client/tests/test_server.ts +++ b/python-client/tests/test_server.ts @@ -23,6 +23,13 @@ import { NaiveJsonCodec } from '../../codec/json'; const ServiceSchema = createServiceSchema(); +const RecursivePayload = Type.Recursive((This) => + Type.Object({ + value: Type.String(), + children: Type.Optional(Type.Array(This)), + }), +); + // ------------------------------------------------------------------- // TestService – mirrors the TS TestServiceSchema // ------------------------------------------------------------------- @@ -86,6 +93,14 @@ const TestServiceSchema = ServiceSchema.define({ return Ok({ data: reqInit.data, length: reqInit.data.length }); }, }), + echoRecursive: Procedure.rpc({ + requestInit: RecursivePayload, + responseData: RecursivePayload, + responseError: Type.Never(), + async handler({ reqInit }) { + return Ok(reqInit); + }, + }), }); // ------------------------------------------------------------------- diff --git a/python-client/tests/test_server_handshake.ts b/python-client/tests/test_server_handshake.ts index c20b024a..62d712aa 100644 --- a/python-client/tests/test_server_handshake.ts +++ b/python-client/tests/test_server_handshake.ts @@ -10,6 +10,7 @@ import { WebSocketServerTransport } from '../../transport/impls/ws/server'; import { createServer, createServiceSchema, Procedure, Ok } from '../../router'; import { createServerHandshakeOptions } from '../../router/handshake'; import { Type } from '@sinclair/typebox'; +import { BinaryCodec } from '../../codec/binary'; const ServiceSchema = createServiceSchema(); @@ -44,6 +45,7 @@ async function main() { const serverTransport = new WebSocketServerTransport( wss, 'HANDSHAKE_SERVER', + { codec: BinaryCodec }, ); const _server = createServer(serverTransport, services, { handshakeOptions: createServerHandshakeOptions( diff --git a/python-client/tests/test_session.py b/python-client/tests/test_session.py index bfa3a725..5a7ed61e 100644 --- a/python-client/tests/test_session.py +++ b/python-client/tests/test_session.py @@ -11,7 +11,7 @@ import pytest from river.client import RiverClient -from river.codec import NaiveJsonCodec +from river.codec import BinaryCodec from river.session import SessionOptions, SessionState from river.transport import WebSocketClientTransport from tests.test_utils import ( @@ -38,7 +38,7 @@ async def make_client( ws_url=server_url, client_id=None, server_id="SERVER", - codec=NaiveJsonCodec(), + codec=BinaryCodec(), options=options or SHORT_OPTIONS, ) return RiverClient( @@ -159,7 +159,7 @@ async def test_backoff_increases_on_failures(self, server_url: str): ws_url="ws://127.0.0.1:1", # intentionally invalid client_id=None, server_id="INVALID", - codec=NaiveJsonCodec(), + codec=BinaryCodec(), options=SessionOptions( connection_timeout_ms=200, handshake_timeout_ms=200, @@ -210,7 +210,7 @@ async def test_rpc_gets_disconnect_on_grace_expiry(self, server_url: str): ws_url=server_url, client_id=None, server_id="SERVER", - codec=NaiveJsonCodec(), + codec=BinaryCodec(), options=SHORT_OPTIONS, ) client = RiverClient( @@ -358,7 +358,7 @@ async def test_connect_on_invoke_false_no_reconnect(self, server_url: str): ws_url=server_url, client_id=None, server_id="SERVER", - codec=NaiveJsonCodec(), + codec=BinaryCodec(), options=SHORT_OPTIONS, ) client = RiverClient( @@ -411,7 +411,7 @@ async def test_done_connect_task_does_not_block_failfast(self): ws_url="ws://127.0.0.1:1", # unreachable client_id=None, server_id="STALE", - codec=NaiveJsonCodec(), + codec=BinaryCodec(), options=SessionOptions( connection_timeout_ms=100, handshake_timeout_ms=100, @@ -464,7 +464,7 @@ async def test_grace_period_not_extended_by_retries(self, server_url: str): ws_url="ws://127.0.0.1:1", # unreachable client_id=None, server_id="GRACE", - codec=NaiveJsonCodec(), + codec=BinaryCodec(), options=SessionOptions( connection_timeout_ms=100, handshake_timeout_ms=100, @@ -518,7 +518,7 @@ async def test_failfast_notifies_existing_streams(self, server_url: str): ws_url=server_url, client_id=None, server_id="SERVER", - codec=NaiveJsonCodec(), + codec=BinaryCodec(), options=SHORT_OPTIONS, ) client = RiverClient( From 50d8f454b6983b4004819a233a51fcffd07891ec Mon Sep 17 00:00:00 2001 From: Jacky Zhao Date: Tue, 3 Mar 2026 00:08:37 -0800 Subject: [PATCH 22/29] fix --- .github/release-drafter-python.yml | 37 ++++++++++ .github/workflows/publish-python.yml | 47 ++++++++++++ .github/workflows/release-drafter-python.yml | 33 +++++++++ python-client/river/codec.py | 27 +------ python-client/river/codegen/schema.py | 18 +++++ python-client/river/transport.py | 12 +-- python-client/river/types.py | 39 +++++++++- python-client/tests/test_codegen.py | 77 ++++++++++++++++++++ python-client/tests/test_e2e.py | 8 +- 9 files changed, 258 insertions(+), 40 deletions(-) create mode 100644 .github/release-drafter-python.yml create mode 100644 .github/workflows/publish-python.yml create mode 100644 .github/workflows/release-drafter-python.yml diff --git a/.github/release-drafter-python.yml b/.github/release-drafter-python.yml new file mode 100644 index 00000000..fa7c4949 --- /dev/null +++ b/.github/release-drafter-python.yml @@ -0,0 +1,37 @@ +name-template: 'river-client-py/v$RESOLVED_VERSION' +tag-template: 'river-client-py/v$RESOLVED_VERSION' +filter-by-commitish: true +include-paths: + - 'python-client/' +categories: + - title: '🚀 Features' + labels: + - 'feature' + - 'enhancement' + - 'python' + - title: '🐛 Bug Fixes' + labels: + - 'fix' + - 'bugfix' + - 'bug' + - title: '🧰 Maintenance' + label: 'chore' + - title: '🤖 Dependencies' + label: 'dependencies' +change-template: '- $TITLE @$AUTHOR (#$NUMBER)' +change-title-escapes: '\<*_&' +version-resolver: + major: + labels: + - 'major' + minor: + labels: + - 'minor' + patch: + labels: + - 'patch' + default: patch +template: | + ## Changes + + $CHANGES diff --git a/.github/workflows/publish-python.yml b/.github/workflows/publish-python.yml new file mode 100644 index 00000000..585e794c --- /dev/null +++ b/.github/workflows/publish-python.yml @@ -0,0 +1,47 @@ +name: Build and Publish Python Package + +on: + release: + types: [published] + +jobs: + build-and-publish: + # Only run for Python releases + if: startsWith(github.event.release.tag_name, 'river-client-py/') + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Install uv + uses: astral-sh/setup-uv@v3 + with: + enable-cache: true + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.12' + + - name: Check if version already published + working-directory: python-client + id: check + run: | + version=$(python -c " + import tomllib + with open('pyproject.toml', 'rb') as f: + print(tomllib.load(f)['project']['version']) + ") + echo "version=$version" >> "$GITHUB_OUTPUT" + if uv pip install --dry-run "river-client==$version" 2>/dev/null; then + echo "skip=true" >> "$GITHUB_OUTPUT" + else + echo "skip=false" >> "$GITHUB_OUTPUT" + fi + + - name: Build and publish + if: steps.check.outputs.skip == 'false' + working-directory: python-client + run: | + uv build + UV_PUBLISH_TOKEN="${{ secrets.PYPI_TOKEN }}" \ + uv publish diff --git a/.github/workflows/release-drafter-python.yml b/.github/workflows/release-drafter-python.yml new file mode 100644 index 00000000..937bceed --- /dev/null +++ b/.github/workflows/release-drafter-python.yml @@ -0,0 +1,33 @@ +name: Release Drafter (Python) + +on: + workflow_dispatch: {} + push: + branches: + - main + paths: + - 'python-client/**' + pull_request: + types: [opened, reopened, synchronize] + paths: + - 'python-client/**' + pull_request_target: + types: [opened, reopened, synchronize] + paths: + - 'python-client/**' + +permissions: + contents: read + +jobs: + update_release_draft: + permissions: + contents: write + pull-requests: write + runs-on: ubuntu-latest + steps: + - uses: release-drafter/release-drafter@v5 + with: + config-name: release-drafter-python.yml + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/python-client/river/codec.py b/python-client/river/codec.py index 53cc966d..579b518d 100644 --- a/python-client/river/codec.py +++ b/python-client/river/codec.py @@ -84,35 +84,16 @@ def from_buffer(self, buf: bytes) -> tuple[bool, TransportMessage | str]: """Deserialize bytes to a TransportMessage. Returns (True, TransportMessage) on success, (False, error_reason) on failure. + Validation of required fields and types is handled by + :meth:`TransportMessage.from_dict`. """ try: raw = self._codec.from_buffer(buf) if not isinstance(raw, dict): return False, f"Expected dict, got {type(raw).__name__}" - # Validate required fields - required = ("id", "from", "to", "seq", "ack", "payload", "streamId") - for f in required: - if f not in raw: - return False, f"Missing required field: {f}" - # Validate field types to prevent downstream crashes - if not isinstance(raw["seq"], int): - return False, ( - f"Field 'seq' must be int, got {type(raw['seq']).__name__}" - ) - if not isinstance(raw["ack"], int): - return False, ( - f"Field 'ack' must be int, got {type(raw['ack']).__name__}" - ) - if not isinstance(raw["id"], str): - return False, ( - f"Field 'id' must be str, got {type(raw['id']).__name__}" - ) - if not isinstance(raw["streamId"], str): - return False, ( - f"Field 'streamId' must be str, " - f"got {type(raw['streamId']).__name__}" - ) msg = TransportMessage.from_dict(raw) return True, msg + except (KeyError, TypeError) as e: + return False, str(e) except Exception as e: return False, f"Failed to deserialize message: {e}" diff --git a/python-client/river/codegen/schema.py b/python-client/river/codegen/schema.py index 04c6e6f7..5161f575 100644 --- a/python-client/river/codegen/schema.py +++ b/python-client/river/codegen/schema.py @@ -163,7 +163,16 @@ def convert(self, raw: dict) -> SchemaIR: self._typedicts = [] self._id_to_name = {} services: list[ServiceDef] = [] + seen_modules: dict[str, str] = {} # sanitized name → wire name for svc_name, svc_data in raw.get("services", {}).items(): + module_name = _sanitize_identifier(svc_name) + if module_name in seen_modules: + raise ValueError( + f"services {seen_modules[module_name]!r} and " + f"{svc_name!r} both map to Python module " + f"{module_name!r}_client.py" + ) + seen_modules[module_name] = svc_name svc_def = self._convert_service(svc_name, svc_data) services.append(svc_def) @@ -172,8 +181,17 @@ def convert(self, raw: dict) -> SchemaIR: def _convert_service(self, name: str, data: dict) -> ServiceDef: class_name = _to_pascal_case(name) procedures: list[ProcedureDef] = [] + seen_py_names: dict[str, str] = {} # py_name → wire name for proc_name, proc_data in data.get("procedures", {}).items(): proc_def = self._convert_procedure(class_name, proc_name, proc_data) + if proc_def.py_name in seen_py_names: + raise ValueError( + f"service {name!r}: procedures " + f"{seen_py_names[proc_def.py_name]!r} and " + f"{proc_name!r} both map to Python method " + f"{proc_def.py_name!r}" + ) + seen_py_names[proc_def.py_name] = proc_name procedures.append(proc_def) return ServiceDef( name=name, diff --git a/python-client/river/transport.py b/python-client/river/transport.py index 4c6d0932..6ac8588e 100644 --- a/python-client/river/transport.py +++ b/python-client/river/transport.py @@ -157,17 +157,7 @@ def get_status(self) -> str: def _get_loop(self) -> asyncio.AbstractEventLoop: if self._loop is None: - try: - self._loop = asyncio.get_running_loop() - except RuntimeError: - try: - loop = asyncio.get_event_loop() - if loop.is_closed(): - raise RuntimeError("closed") - self._loop = loop - except RuntimeError: - self._loop = asyncio.new_event_loop() - asyncio.set_event_loop(self._loop) + self._loop = asyncio.get_running_loop() return self._loop # --- Event API --- diff --git a/python-client/river/types.py b/python-client/river/types.py index 2e81f64c..af193089 100644 --- a/python-client/river/types.py +++ b/python-client/river/types.py @@ -88,7 +88,42 @@ def to_dict(self) -> dict[str, Any]: @classmethod def from_dict(cls, d: dict[str, Any]) -> TransportMessage: - """Deserialize from a wire format dict.""" + """Deserialize from a wire format dict. + + Raises ``TypeError`` if required fields have wrong types. + """ + required_str = {"id": "id", "from": "from", "streamId": "streamId"} + for wire_key, label in required_str.items(): + if wire_key not in d: + raise KeyError(f"Missing required field: {label}") + if not isinstance(d[wire_key], str): + raise TypeError( + f"Field '{label}' must be str, " + f"got {type(d[wire_key]).__name__}" + ) + + required_int = {"seq": "seq", "ack": "ack"} + for wire_key, label in required_int.items(): + if wire_key not in d: + raise KeyError(f"Missing required field: {label}") + if not isinstance(d[wire_key], int): + raise TypeError( + f"Field '{label}' must be int, " + f"got {type(d[wire_key]).__name__}" + ) + + if "to" not in d: + raise KeyError("Missing required field: to") + if "payload" not in d: + raise KeyError("Missing required field: payload") + + control_flags = d.get("controlFlags", 0) + if not isinstance(control_flags, int): + raise TypeError( + f"Field 'controlFlags' must be int, " + f"got {type(control_flags).__name__}" + ) + return cls( id=d["id"], from_=d["from"], @@ -97,7 +132,7 @@ def from_dict(cls, d: dict[str, Any]) -> TransportMessage: ack=d["ack"], payload=d["payload"], stream_id=d["streamId"], - control_flags=d.get("controlFlags", 0), + control_flags=control_flags, service_name=d.get("serviceName"), procedure_name=d.get("procedureName"), tracing=d.get("tracing"), diff --git a/python-client/tests/test_codegen.py b/python-client/tests/test_codegen.py index 570556b2..ac2f9088 100644 --- a/python-client/tests/test_codegen.py +++ b/python-client/tests/test_codegen.py @@ -508,6 +508,83 @@ def test_valid_schema_passes(self): assert [f.name for f in td.fields] == ["userId", "count"] +class TestNameCollisions: + """Codegen detects and rejects name collisions.""" + + def test_procedure_name_collision_raises(self): + """Two procedures that map to the same snake_case name are rejected.""" + from river.codegen.schema import SchemaConverter + + raw = { + "services": { + "svc": { + "procedures": { + "fooBar": { + "type": "rpc", + "init": {"type": "object", "properties": {}}, + "output": {"type": "object", "properties": {}}, + }, + "foo_bar": { + "type": "rpc", + "init": {"type": "object", "properties": {}}, + "output": {"type": "object", "properties": {}}, + }, + } + } + } + } + converter = SchemaConverter() + with pytest.raises(ValueError, match="foo_bar"): + converter.convert(raw) + + def test_service_module_collision_raises(self): + """Two services that map to the same module name are rejected.""" + from river.codegen.schema import SchemaConverter + + raw = { + "services": { + "foo-bar": { + "procedures": {}, + }, + "foo_bar": { + "procedures": {}, + }, + } + } + converter = SchemaConverter() + with pytest.raises(ValueError, match="foo_bar"): + converter.convert(raw) + + def test_no_collision_passes(self): + """Distinct names that don't collide work fine.""" + from river.codegen.schema import SchemaConverter + + raw = { + "services": { + "alpha": { + "procedures": { + "doX": { + "type": "rpc", + "init": {"type": "object", "properties": {}}, + "output": {"type": "object", "properties": {}}, + }, + "doY": { + "type": "rpc", + "init": {"type": "object", "properties": {}}, + "output": {"type": "object", "properties": {}}, + }, + } + }, + "beta": { + "procedures": {}, + }, + } + } + converter = SchemaConverter() + ir = converter.convert(raw) + assert len(ir.services) == 2 + + # --------------------------------------------------------------------------- # Complex type tests # --------------------------------------------------------------------------- diff --git a/python-client/tests/test_e2e.py b/python-client/tests/test_e2e.py index 9f373b6b..586abc95 100644 --- a/python-client/tests/test_e2e.py +++ b/python-client/tests/test_e2e.py @@ -1860,16 +1860,16 @@ def fake_inject(carrier): class TestEagerConnectSync: - def test_eager_connect_does_not_raise_outside_loop(self): + def test_eager_connect_raises_outside_loop(self): """Constructing with eagerly_connect=True outside an event loop - should not raise RuntimeError.""" + raises RuntimeError rather than silently binding to a dead loop.""" transport = WebSocketClientTransport( ws_url="ws://127.0.0.1:1", server_id="SERVER", codec=BinaryCodec(), ) - # This used to raise "no running event loop" - RiverClient(transport, server_id="SERVER", eagerly_connect=True) + with pytest.raises(RuntimeError, match="no running event loop"): + RiverClient(transport, server_id="SERVER", eagerly_connect=True) # ===================================================================== From e33670572be916cd67cd466e8b30e4998bb8d2f4 Mon Sep 17 00:00:00 2001 From: Jacky Zhao Date: Tue, 3 Mar 2026 00:22:50 -0800 Subject: [PATCH 23/29] fix --- .github/workflows/release-drafter-python.yml | 6 ----- python-client/river/codegen/emitter.py | 8 +++++++ python-client/river/codegen/schema.py | 11 +++++++++ .../codegen/templates/service_client.py.j2 | 8 +++---- .../river/codegen/templates/types.py.j2 | 2 +- python-client/river/types.py | 23 ++++++++++--------- python-client/tests/test_codegen.py | 21 +++++++++++++++++ 7 files changed, 57 insertions(+), 22 deletions(-) diff --git a/.github/workflows/release-drafter-python.yml b/.github/workflows/release-drafter-python.yml index 937bceed..746dcca2 100644 --- a/.github/workflows/release-drafter-python.yml +++ b/.github/workflows/release-drafter-python.yml @@ -5,16 +5,10 @@ on: push: branches: - main - paths: - - 'python-client/**' pull_request: types: [opened, reopened, synchronize] - paths: - - 'python-client/**' pull_request_target: types: [opened, reopened, synchronize] - paths: - - 'python-client/**' permissions: contents: read diff --git a/python-client/river/codegen/emitter.py b/python-client/river/codegen/emitter.py index 6bb7e49a..94ca0c93 100644 --- a/python-client/river/codegen/emitter.py +++ b/python-client/river/codegen/emitter.py @@ -29,6 +29,14 @@ _env.filters["pascal"] = _to_pascal_case +def _escape_docstring(s: str) -> str: + """Escape a string for use inside triple-quoted docstrings.""" + return s.replace("\\", "\\\\").replace('"""', r"\"\"\"") + + +_env.filters["docstring"] = _escape_docstring + + def _result_type(proc) -> str: # noqa: ANN001 """Build the typed result annotation for a procedure.""" ok = f"OkResult[{proc.output_type.annotation}]" diff --git a/python-client/river/codegen/schema.py b/python-client/river/codegen/schema.py index 5161f575..b2613578 100644 --- a/python-client/river/codegen/schema.py +++ b/python-client/river/codegen/schema.py @@ -164,6 +164,7 @@ def convert(self, raw: dict) -> SchemaIR: self._id_to_name = {} services: list[ServiceDef] = [] seen_modules: dict[str, str] = {} # sanitized name → wire name + seen_classes: dict[str, str] = {} # class name → wire name for svc_name, svc_data in raw.get("services", {}).items(): module_name = _sanitize_identifier(svc_name) if module_name in seen_modules: @@ -173,6 +174,16 @@ def convert(self, raw: dict) -> SchemaIR: f"{module_name!r}_client.py" ) seen_modules[module_name] = svc_name + + class_name = _to_pascal_case(svc_name) + "Client" + if class_name in seen_classes: + raise ValueError( + f"services {seen_classes[class_name]!r} and " + f"{svc_name!r} both map to Python class " + f"{class_name!r}" + ) + seen_classes[class_name] = svc_name + svc_def = self._convert_service(svc_name, svc_data) services.append(svc_def) diff --git a/python-client/river/codegen/templates/service_client.py.j2 b/python-client/river/codegen/templates/service_client.py.j2 index 2cd7ff0a..cb930eab 100644 --- a/python-client/river/codegen/templates/service_client.py.j2 +++ b/python-client/river/codegen/templates/service_client.py.j2 @@ -44,7 +44,7 @@ class {{ service.class_name }}Client: abort_signal: asyncio.Event | None = None, ) -> {{ proc | result_type }}: {% if proc.description %} - """{{ proc.description }}""" + """{{ proc.description | docstring }}""" {% endif %} return await self._client.rpc( "{{ service.name }}", @@ -60,7 +60,7 @@ class {{ service.class_name }}Client: abort_signal: asyncio.Event | None = None, ) -> StreamResult[{{ proc.input_type.annotation }}]: {% if proc.description %} - """{{ proc.description }}""" + """{{ proc.description | docstring }}""" {% endif %} return self._client.stream( "{{ service.name }}", @@ -76,7 +76,7 @@ class {{ service.class_name }}Client: abort_signal: asyncio.Event | None = None, ) -> UploadResult[{{ proc.input_type.annotation }}]: {% if proc.description %} - """{{ proc.description }}""" + """{{ proc.description | docstring }}""" {% endif %} return self._client.upload( "{{ service.name }}", @@ -92,7 +92,7 @@ class {{ service.class_name }}Client: abort_signal: asyncio.Event | None = None, ) -> SubscriptionResult: {% if proc.description %} - """{{ proc.description }}""" + """{{ proc.description | docstring }}""" {% endif %} return self._client.subscribe( "{{ service.name }}", diff --git a/python-client/river/codegen/templates/types.py.j2 b/python-client/river/codegen/templates/types.py.j2 index 0614d707..39954ca7 100644 --- a/python-client/river/codegen/templates/types.py.j2 +++ b/python-client/river/codegen/templates/types.py.j2 @@ -12,7 +12,7 @@ from typing_extensions import {{ typing_ext_imports | join(", ") }} class {{ td.name }}(TypedDict): {% if td.description %} - """{{ td.description }}""" + """{{ td.description | docstring }}""" {% endif %} {% if not td.fields %} diff --git a/python-client/river/types.py b/python-client/river/types.py index af193089..31eb9695 100644 --- a/python-client/river/types.py +++ b/python-client/river/types.py @@ -92,36 +92,37 @@ def from_dict(cls, d: dict[str, Any]) -> TransportMessage: Raises ``TypeError`` if required fields have wrong types. """ - required_str = {"id": "id", "from": "from", "streamId": "streamId"} + required_str = { + "id": "id", + "from": "from", + "to": "to", + "streamId": "streamId", + } for wire_key, label in required_str.items(): if wire_key not in d: raise KeyError(f"Missing required field: {label}") if not isinstance(d[wire_key], str): raise TypeError( - f"Field '{label}' must be str, " - f"got {type(d[wire_key]).__name__}" + f"Field '{label}' must be str, got {type(d[wire_key]).__name__}" ) required_int = {"seq": "seq", "ack": "ack"} for wire_key, label in required_int.items(): if wire_key not in d: raise KeyError(f"Missing required field: {label}") - if not isinstance(d[wire_key], int): + # bool is a subclass of int in Python — reject it explicitly + if isinstance(d[wire_key], bool) or not isinstance(d[wire_key], int): raise TypeError( - f"Field '{label}' must be int, " - f"got {type(d[wire_key]).__name__}" + f"Field '{label}' must be int, got {type(d[wire_key]).__name__}" ) - if "to" not in d: - raise KeyError("Missing required field: to") if "payload" not in d: raise KeyError("Missing required field: payload") control_flags = d.get("controlFlags", 0) - if not isinstance(control_flags, int): + if isinstance(control_flags, bool) or not isinstance(control_flags, int): raise TypeError( - f"Field 'controlFlags' must be int, " - f"got {type(control_flags).__name__}" + f"Field 'controlFlags' must be int, got {type(control_flags).__name__}" ) return cls( diff --git a/python-client/tests/test_codegen.py b/python-client/tests/test_codegen.py index ac2f9088..45a9ab2e 100644 --- a/python-client/tests/test_codegen.py +++ b/python-client/tests/test_codegen.py @@ -584,6 +584,27 @@ def test_no_collision_passes(self): ir = converter.convert(raw) assert len(ir.services) == 2 + def test_service_class_name_collision_raises(self): + """Two services that map to the same class name are rejected.""" + from river.codegen.schema import SchemaConverter + + raw = { + "services": { + "foo_bar": {"procedures": {}}, + "FooBar": {"procedures": {}}, + } + } + converter = SchemaConverter() + with pytest.raises(ValueError, match="FooBarClient"): + converter.convert(raw) + + def test_description_with_triple_quotes(self): + """Descriptions containing triple quotes are escaped in output.""" + from river.codegen.emitter import _escape_docstring + + assert '"""' not in _escape_docstring('bad """ doc') + assert _escape_docstring('say """hello"""') == r"say \"\"\"hello\"\"\"" + # --------------------------------------------------------------------------- # Complex type tests From fa9b4fbce600c26d8730a7f971bd1a23918eb60d Mon Sep 17 00:00:00 2001 From: Jacky Zhao Date: Tue, 3 Mar 2026 00:44:15 -0800 Subject: [PATCH 24/29] typing --- python-client/river/client.py | 33 +++++++++++-------- .../codegen/templates/service_client.py.j2 | 6 ++-- .../tests/generated/cancel_client.py | 14 ++++---- .../tests/generated/fallible_client.py | 2 +- .../tests/generated/subscribable_client.py | 2 +- python-client/tests/generated/test_client.py | 4 +-- .../tests/generated/uploadable_client.py | 6 ++-- 7 files changed, 37 insertions(+), 30 deletions(-) diff --git a/python-client/river/client.py b/python-client/river/client.py index b7c6f812..a0bc1e27 100644 --- a/python-client/river/client.py +++ b/python-client/river/client.py @@ -8,8 +8,9 @@ import asyncio import logging +from collections.abc import Awaitable, Callable from dataclasses import dataclass -from typing import Any, Callable, Generic, Literal, TypeVar +from typing import Any, Generic, Literal, TypeVar from typing_extensions import TypedDict @@ -33,6 +34,7 @@ logger = logging.getLogger(__name__) T = TypeVar("T") +TOutput = TypeVar("TOutput") TPayload = TypeVar("TPayload") @@ -51,32 +53,37 @@ class ErrResult(TypedDict, Generic[TPayload]): @dataclass -class StreamResult(Generic[T]): +class StreamResult(Generic[T, TOutput]): """Result of opening a stream procedure. - Generic over the input type ``T`` written to ``req_writable``. + Generic over the input type ``T`` written to ``req_writable`` + and the output type ``TOutput`` read from ``res_readable``. """ req_writable: Writable[T] - res_readable: Readable + res_readable: Readable[TOutput] @dataclass -class UploadResult(Generic[T]): +class UploadResult(Generic[T, TOutput]): """Result of opening an upload procedure. - Generic over the input type ``T`` written to ``req_writable``. + Generic over the input type ``T`` written to ``req_writable`` + and the output type ``TOutput`` returned by ``finalize()``. """ req_writable: Writable[T] - finalize: Callable[[], Any] # async callable returning RpcResult + finalize: Callable[[], Awaitable[TOutput]] @dataclass -class SubscriptionResult: - """Result of opening a subscription procedure.""" +class SubscriptionResult(Generic[T]): + """Result of opening a subscription procedure. - res_readable: Readable + Generic over the output type ``T`` received from ``res_readable``. + """ + + res_readable: Readable[T] class RiverClient: @@ -156,7 +163,7 @@ def stream( procedure_name: str, init: Any, abort_signal: asyncio.Event | None = None, - ) -> StreamResult: + ) -> StreamResult[Any, Any]: """Open a stream procedure. Returns StreamResult with req_writable and res_readable. @@ -179,7 +186,7 @@ def upload( procedure_name: str, init: Any, abort_signal: asyncio.Event | None = None, - ) -> UploadResult: + ) -> UploadResult[Any, Any]: """Open an upload procedure. Returns UploadResult with req_writable and finalize(). @@ -213,7 +220,7 @@ def subscribe( procedure_name: str, init: Any, abort_signal: asyncio.Event | None = None, - ) -> SubscriptionResult: + ) -> SubscriptionResult[Any]: """Open a subscription procedure. Returns SubscriptionResult with res_readable. diff --git a/python-client/river/codegen/templates/service_client.py.j2 b/python-client/river/codegen/templates/service_client.py.j2 index cb930eab..89959804 100644 --- a/python-client/river/codegen/templates/service_client.py.j2 +++ b/python-client/river/codegen/templates/service_client.py.j2 @@ -58,7 +58,7 @@ class {{ service.class_name }}Client: init: {{ proc.init_type.annotation }}, *, abort_signal: asyncio.Event | None = None, - ) -> StreamResult[{{ proc.input_type.annotation }}]: + ) -> StreamResult[{{ proc.input_type.annotation }}, {{ proc | result_type }}]: {% if proc.description %} """{{ proc.description | docstring }}""" {% endif %} @@ -74,7 +74,7 @@ class {{ service.class_name }}Client: init: {{ proc.init_type.annotation }}, *, abort_signal: asyncio.Event | None = None, - ) -> UploadResult[{{ proc.input_type.annotation }}]: + ) -> UploadResult[{{ proc.input_type.annotation }}, {{ proc | result_type }}]: {% if proc.description %} """{{ proc.description | docstring }}""" {% endif %} @@ -90,7 +90,7 @@ class {{ service.class_name }}Client: init: {{ proc.init_type.annotation }}, *, abort_signal: asyncio.Event | None = None, - ) -> SubscriptionResult: + ) -> SubscriptionResult[{{ proc | result_type }}]: {% if proc.description %} """{{ proc.description | docstring }}""" {% endif %} diff --git a/python-client/tests/generated/cancel_client.py b/python-client/tests/generated/cancel_client.py index 4388a876..6ba531da 100644 --- a/python-client/tests/generated/cancel_client.py +++ b/python-client/tests/generated/cancel_client.py @@ -67,7 +67,7 @@ def blocking_stream( init: CancelBlockingStreamInit, *, abort_signal: asyncio.Event | None = None, - ) -> StreamResult[CancelBlockingStreamInput]: + ) -> StreamResult[CancelBlockingStreamInput, OkResult[CancelBlockingStreamOutput] | ErrResult[ProtocolError]]: return self._client.stream( "cancel", "blockingStream", @@ -80,7 +80,7 @@ def blocking_upload( init: CancelBlockingUploadInit, *, abort_signal: asyncio.Event | None = None, - ) -> UploadResult[CancelBlockingUploadInput]: + ) -> UploadResult[CancelBlockingUploadInput, OkResult[CancelBlockingUploadOutput] | ErrResult[ProtocolError]]: return self._client.upload( "cancel", "blockingUpload", @@ -93,7 +93,7 @@ def blocking_subscription( init: CancelBlockingSubscriptionInit, *, abort_signal: asyncio.Event | None = None, - ) -> SubscriptionResult: + ) -> SubscriptionResult[OkResult[CancelBlockingSubscriptionOutput] | ErrResult[ProtocolError]]: return self._client.subscribe( "cancel", "blockingSubscription", @@ -119,7 +119,7 @@ def immediate_stream( init: CancelImmediateStreamInit, *, abort_signal: asyncio.Event | None = None, - ) -> StreamResult[CancelImmediateStreamInput]: + ) -> StreamResult[CancelImmediateStreamInput, OkResult[CancelImmediateStreamOutput] | ErrResult[ProtocolError]]: return self._client.stream( "cancel", "immediateStream", @@ -132,7 +132,7 @@ def immediate_upload( init: CancelImmediateUploadInit, *, abort_signal: asyncio.Event | None = None, - ) -> UploadResult[CancelImmediateUploadInput]: + ) -> UploadResult[CancelImmediateUploadInput, OkResult[CancelImmediateUploadOutput] | ErrResult[ProtocolError]]: return self._client.upload( "cancel", "immediateUpload", @@ -145,7 +145,7 @@ def immediate_subscription( init: CancelImmediateSubscriptionInit, *, abort_signal: asyncio.Event | None = None, - ) -> SubscriptionResult: + ) -> SubscriptionResult[OkResult[CancelImmediateSubscriptionOutput] | ErrResult[ProtocolError]]: return self._client.subscribe( "cancel", "immediateSubscription", @@ -158,7 +158,7 @@ def counted_stream( init: CancelCountedStreamInit, *, abort_signal: asyncio.Event | None = None, - ) -> StreamResult[CancelCountedStreamInput]: + ) -> StreamResult[CancelCountedStreamInput, OkResult[CancelCountedStreamOutput] | ErrResult[ProtocolError]]: return self._client.stream( "cancel", "countedStream", diff --git a/python-client/tests/generated/fallible_client.py b/python-client/tests/generated/fallible_client.py index 8c799556..72b1f465 100644 --- a/python-client/tests/generated/fallible_client.py +++ b/python-client/tests/generated/fallible_client.py @@ -52,7 +52,7 @@ def echo( init: FallibleEchoInit, *, abort_signal: asyncio.Event | None = None, - ) -> StreamResult[FallibleEchoInput]: + ) -> StreamResult[FallibleEchoInput, OkResult[FallibleEchoOutput] | ErrResult[FallibleEchoError | ProtocolError]]: return self._client.stream( "fallible", "echo", diff --git a/python-client/tests/generated/subscribable_client.py b/python-client/tests/generated/subscribable_client.py index 9c1649d3..dcc2207d 100644 --- a/python-client/tests/generated/subscribable_client.py +++ b/python-client/tests/generated/subscribable_client.py @@ -48,7 +48,7 @@ def value( init: SubscribableValueInit, *, abort_signal: asyncio.Event | None = None, - ) -> SubscriptionResult: + ) -> SubscriptionResult[OkResult[SubscribableValueOutput] | ErrResult[ProtocolError]]: return self._client.subscribe( "subscribable", "value", diff --git a/python-client/tests/generated/test_client.py b/python-client/tests/generated/test_client.py index c3def4ed..94099461 100644 --- a/python-client/tests/generated/test_client.py +++ b/python-client/tests/generated/test_client.py @@ -56,7 +56,7 @@ def echo( init: TestEchoInit, *, abort_signal: asyncio.Event | None = None, - ) -> StreamResult[TestEchoInput]: + ) -> StreamResult[TestEchoInput, OkResult[TestEchoOutput] | ErrResult[ProtocolError]]: return self._client.stream( "test", "echo", @@ -69,7 +69,7 @@ def echo_with_prefix( init: TestEchoWithPrefixInit, *, abort_signal: asyncio.Event | None = None, - ) -> StreamResult[TestEchoWithPrefixInput]: + ) -> StreamResult[TestEchoWithPrefixInput, OkResult[TestEchoWithPrefixOutput] | ErrResult[ProtocolError]]: return self._client.stream( "test", "echoWithPrefix", diff --git a/python-client/tests/generated/uploadable_client.py b/python-client/tests/generated/uploadable_client.py index a14c45e6..89ea12b5 100644 --- a/python-client/tests/generated/uploadable_client.py +++ b/python-client/tests/generated/uploadable_client.py @@ -40,7 +40,7 @@ def add_multiple( init: UploadableAddMultipleInit, *, abort_signal: asyncio.Event | None = None, - ) -> UploadResult[UploadableAddMultipleInput]: + ) -> UploadResult[UploadableAddMultipleInput, OkResult[UploadableAddMultipleOutput] | ErrResult[ProtocolError]]: return self._client.upload( "uploadable", "addMultiple", @@ -53,7 +53,7 @@ def add_multiple_with_prefix( init: UploadableAddMultipleWithPrefixInit, *, abort_signal: asyncio.Event | None = None, - ) -> UploadResult[UploadableAddMultipleWithPrefixInput]: + ) -> UploadResult[UploadableAddMultipleWithPrefixInput, OkResult[UploadableAddMultipleWithPrefixOutput] | ErrResult[ProtocolError]]: return self._client.upload( "uploadable", "addMultipleWithPrefix", @@ -66,7 +66,7 @@ def cancellable_add( init: UploadableCancellableAddInit, *, abort_signal: asyncio.Event | None = None, - ) -> UploadResult[UploadableCancellableAddInput]: + ) -> UploadResult[UploadableCancellableAddInput, OkResult[UploadableCancellableAddOutput] | ErrResult[ProtocolError]]: return self._client.upload( "uploadable", "cancellableAdd", From 149d68e9eb3b90fc16ab48fccea48a24a3a0428e Mon Sep 17 00:00:00 2001 From: Jacky Zhao Date: Tue, 3 Mar 2026 01:06:25 -0800 Subject: [PATCH 25/29] fix --- python-client/river/codegen/schema.py | 19 +++++++++++++-- python-client/river/types.py | 4 +++- python-client/tests/test_codegen.py | 33 +++++++++++++++++++++++++-- 3 files changed, 51 insertions(+), 5 deletions(-) diff --git a/python-client/river/codegen/schema.py b/python-client/river/codegen/schema.py index b2613578..dcae9a5b 100644 --- a/python-client/river/codegen/schema.py +++ b/python-client/river/codegen/schema.py @@ -157,11 +157,14 @@ def __init__(self) -> None: self._typedicts: list[TypedDictDef] = [] # $id → assigned Python name (for recursive $ref resolution) self._id_to_name: dict[str, str] = {} + # Track emitted TypedDict names to detect collisions + self._td_names: set[str] = set() def convert(self, raw: dict) -> SchemaIR: """Convert the top-level serialized schema dict to IR.""" self._typedicts = [] self._id_to_name = {} + self._td_names = set() services: list[ServiceDef] = [] seen_modules: dict[str, str] = {} # sanitized name → wire name seen_classes: dict[str, str] = {} # class name → wire name @@ -345,6 +348,16 @@ def _schema_to_typeref(self, schema: dict, name_hint: str) -> TypeRef: # Fallback return TypeRef(annotation="Any") + def _emit_typedict(self, td: TypedDictDef) -> None: + """Register a TypedDict, raising on name collision.""" + if td.name in self._td_names: + raise ValueError( + f"TypedDict name {td.name!r} is already used; " + f"two schema properties map to the same generated class" + ) + self._td_names.add(td.name) + self._typedicts.append(td) + def _convert_object(self, schema: dict, name: str) -> TypeRef: """Convert a JSON Schema object to a TypedDict and return a ref to it.""" properties = schema.get("properties", {}) @@ -371,7 +384,7 @@ def _convert_object(self, schema: dict, name: str) -> TypeRef: ) td = TypedDictDef(name=name, fields=fields, description=description) - self._typedicts.append(td) + self._emit_typedict(td) return TypeRef(annotation=name) def _convert_intersection(self, schema: dict, name_hint: str) -> TypeRef: @@ -429,7 +442,7 @@ def _convert_intersection(self, schema: dict, name_hint: str) -> TypeRef: ) ) td = TypedDictDef(name=name_hint, fields=fields, description=description) - self._typedicts.append(td) + self._emit_typedict(td) return TypeRef(annotation=name_hint) # No object variants — primitive intersection is unrepresentable @@ -441,6 +454,8 @@ def _convert_intersection(self, schema: dict, name_hint: str) -> TypeRef: def _convert_union(self, schema: dict, name_hint: str) -> TypeRef: """Convert a JSON Schema anyOf to a Union type.""" variants = schema.get("anyOf", []) + if len(variants) == 0: + return TypeRef(annotation="Never") if len(variants) == 1: return self._schema_to_typeref(variants[0], name_hint) diff --git a/python-client/river/types.py b/python-client/river/types.py index 31eb9695..5ebb3fbe 100644 --- a/python-client/river/types.py +++ b/python-client/river/types.py @@ -119,7 +119,9 @@ def from_dict(cls, d: dict[str, Any]) -> TransportMessage: if "payload" not in d: raise KeyError("Missing required field: payload") - control_flags = d.get("controlFlags", 0) + if "controlFlags" not in d: + raise KeyError("Missing required field: controlFlags") + control_flags = d["controlFlags"] if isinstance(control_flags, bool) or not isinstance(control_flags, int): raise TypeError( f"Field 'controlFlags' must be int, got {type(control_flags).__name__}" diff --git a/python-client/tests/test_codegen.py b/python-client/tests/test_codegen.py index 45a9ab2e..9d9f4a98 100644 --- a/python-client/tests/test_codegen.py +++ b/python-client/tests/test_codegen.py @@ -512,7 +512,7 @@ class TestNameCollisions: """Codegen detects and rejects name collisions.""" def test_procedure_name_collision_raises(self): - """Two procedures that map to the same snake_case name are rejected.""" + """Two procedures that collide (method name or TypedDict) are rejected.""" from river.codegen.schema import SchemaConverter raw = { @@ -534,7 +534,7 @@ def test_procedure_name_collision_raises(self): } } converter = SchemaConverter() - with pytest.raises(ValueError, match="foo_bar"): + with pytest.raises(ValueError): converter.convert(raw) def test_service_module_collision_raises(self): @@ -605,6 +605,35 @@ def test_description_with_triple_quotes(self): assert '"""' not in _escape_docstring('bad """ doc') assert _escape_docstring('say """hello"""') == r"say \"\"\"hello\"\"\"" + def test_typedict_name_collision_raises(self): + """Two properties that generate the same TypedDict name are rejected.""" + from river.codegen.schema import SchemaConverter + + converter = SchemaConverter() + schema = { + "type": "object", + "properties": { + "fooBar": { + "type": "object", + "properties": {"a": {"type": "string"}}, + }, + "FooBar": { + "type": "object", + "properties": {"b": {"type": "number"}}, + }, + }, + } + with pytest.raises(ValueError, match="already used"): + converter._schema_to_typeref(schema, "Prefix") + + def test_empty_anyof_is_never(self): + """anyOf with zero variants → Never.""" + from river.codegen.schema import SchemaConverter + + converter = SchemaConverter() + ref = converter._schema_to_typeref({"anyOf": []}, "X") + assert ref.annotation == "Never" + # --------------------------------------------------------------------------- # Complex type tests From 98d408785736748631b24e083bdc23e8ea0ad86c Mon Sep 17 00:00:00 2001 From: Jacky Zhao Date: Tue, 3 Mar 2026 13:19:02 -0800 Subject: [PATCH 26/29] more --- python-client/river/__init__.py | 2 + python-client/river/client.py | 26 ++++ python-client/river/codec.py | 10 +- python-client/river/codegen/__main__.py | 9 +- python-client/river/codegen/emitter.py | 65 +++++++- python-client/river/codegen/schema.py | 112 ++++++++------ .../river/codegen/templates/root_client.py.j2 | 18 +++ .../river/codegen/templates/types.py.j2 | 5 + python-client/river/transport.py | 9 ++ python-client/tests/generated_v2/__init__.py | 11 ++ python-client/tests/generated_v2/_errors.py | 45 ++++++ .../tests/generated_v2/_root_client.py | 14 ++ python-client/tests/generated_v2/_types.py | 17 +++ .../tests/generated_v2/test_client.py | 42 ++++++ python-client/tests/test_codegen.py | 88 ++++++----- python-client/tests/test_e2e.py | 96 ++++++++++++ .../tests/test_schema_handshake.json | 142 ++++++++++++++++++ 17 files changed, 623 insertions(+), 88 deletions(-) create mode 100644 python-client/river/codegen/templates/root_client.py.j2 create mode 100644 python-client/tests/generated_v2/__init__.py create mode 100644 python-client/tests/generated_v2/_errors.py create mode 100644 python-client/tests/generated_v2/_root_client.py create mode 100644 python-client/tests/generated_v2/_types.py create mode 100644 python-client/tests/generated_v2/test_client.py create mode 100644 python-client/tests/test_schema_handshake.json diff --git a/python-client/river/__init__.py b/python-client/river/__init__.py index f739d745..34864460 100644 --- a/python-client/river/__init__.py +++ b/python-client/river/__init__.py @@ -12,6 +12,7 @@ UploadResult, ) from river.codec import BinaryCodec +from river.session import SessionOptions from river.streams import Readable, Writable from river.transport import WebSocketClientTransport from river.types import Err, Ok, TransportMessage @@ -25,6 +26,7 @@ "SubscriptionResult", "WebSocketClientTransport", "BinaryCodec", + "SessionOptions", "TransportMessage", "Ok", "Err", diff --git a/python-client/river/client.py b/python-client/river/client.py index a0bc1e27..39dce15a 100644 --- a/python-client/river/client.py +++ b/python-client/river/client.py @@ -128,6 +128,32 @@ def __init__( if eagerly_connect: transport.connect(self._server_id) + @classmethod + async def connect( + cls, + url: str, + *, + client_id: str | None = None, + server_id: str = "SERVER", + handshake_metadata: Any = None, + options: "SessionOptions | None" = None, + ) -> "RiverClient": + """Create a connected RiverClient. + + Convenience factory that creates a transport and eagerly connects. + """ + from river.session import SessionOptions as _SO + + transport = WebSocketClientTransport( + url, + client_id=client_id, + server_id=server_id, + handshake_metadata=handshake_metadata, + options=options or _SO(), + ) + client = cls(transport, server_id=server_id, eagerly_connect=True) + return client + @property def transport(self) -> WebSocketClientTransport: return self._transport diff --git a/python-client/river/codec.py b/python-client/river/codec.py index 579b518d..62d33738 100644 --- a/python-client/river/codec.py +++ b/python-client/river/codec.py @@ -23,8 +23,10 @@ def from_buffer(self, buf: bytes) -> dict[str, Any]: _BIGINT_EXT_TYPE = 0 -_MSGPACK_INT_MAX = 2**64 - 1 -_MSGPACK_INT_MIN = -(2**63) +# Use JS Number.MAX_SAFE_INTEGER bounds, not msgpack's 64-bit range. +# Values outside this range lose precision when decoded as JS numbers. +_MAX_SAFE_INTEGER = 2**53 - 1 +_MIN_SAFE_INTEGER = -(2**53 - 1) class BinaryCodec(Codec): @@ -46,7 +48,9 @@ def from_buffer(self, buf: bytes) -> dict[str, Any]: def _ext_encode(obj: Any) -> Any: import msgpack - if isinstance(obj, int) and (obj > _MSGPACK_INT_MAX or obj < _MSGPACK_INT_MIN): + if isinstance(obj, int) and ( + obj > _MAX_SAFE_INTEGER or obj < _MIN_SAFE_INTEGER + ): # Encode as string in extension type 0 (matches TS BigInt ext) data = msgpack.packb(str(obj), use_bin_type=True) return msgpack.ExtType(_BIGINT_EXT_TYPE, data) diff --git a/python-client/river/codegen/__main__.py b/python-client/river/codegen/__main__.py index 09e88a2e..9c46594b 100644 --- a/python-client/river/codegen/__main__.py +++ b/python-client/river/codegen/__main__.py @@ -35,6 +35,11 @@ def main(argv: list[str] | None = None) -> None: default=None, help="Absolute import prefix instead of relative imports.", ) + parser.add_argument( + "--client-name", + default=None, + help="Generate a root client class with this name that aggregates all services.", + ) args = parser.parse_args(argv) @@ -44,7 +49,9 @@ def main(argv: list[str] | None = None) -> None: converter = SchemaConverter() ir = converter.convert(raw_schema) - written = write_generated_files(ir, args.output, package=args.package) + written = write_generated_files( + ir, args.output, package=args.package, client_name=args.client_name + ) for path in written: print(f" wrote {path}") diff --git a/python-client/river/codegen/emitter.py b/python-client/river/codegen/emitter.py index 94ca0c93..0896fd16 100644 --- a/python-client/river/codegen/emitter.py +++ b/python-client/river/codegen/emitter.py @@ -98,7 +98,11 @@ def _prepare_typedicts(ir: SchemaIR) -> list[dict]: for td in ir.typedicts: fields = [] for f in td.fields: - fields.append({"name": f.name, "annotation": _field_annotation(f)}) + fields.append({ + "name": f.name, + "annotation": _field_annotation(f), + "description": f.description, + }) result.append( {"name": td.name, "description": td.description, "fields": fields} ) @@ -112,6 +116,19 @@ def render_errors() -> str: def render_types(ir: SchemaIR) -> str: typedicts = _prepare_typedicts(ir) + # Append handshake TypedDict if present + if ir.handshake_type: + hs_fields = [] + for f in ir.handshake_type.fields: + hs_fields.append({"name": f.name, "annotation": _field_annotation(f)}) + typedicts.append( + { + "name": ir.handshake_type.name, + "description": ir.handshake_type.description, + "fields": hs_fields, + } + ) + needs_literal = any( "Literal[" in f["annotation"] for td in typedicts for f in td["fields"] ) @@ -168,7 +185,34 @@ def _module_name(service_name: str) -> str: return _sanitize_identifier(service_name) -def render_init(ir: SchemaIR, import_prefix: str) -> str: +def render_root_client( + ir: SchemaIR, client_name: str, import_prefix: str +) -> str: + imports = [] + services = [] + for svc in ir.services: + mod_name = _module_name(svc.name) + cls = f"{svc.class_name}Client" + if import_prefix == ".": + mod = f".{mod_name}_client" + else: + mod = f"{import_prefix}{mod_name}_client" + imports.append((mod, cls)) + services.append((_sanitize_identifier(svc.name), cls)) + + imports.sort(key=lambda x: x[0]) + services.sort(key=lambda x: x[0]) + + return _env.get_template("root_client.py.j2").render( + client_name=client_name, + imports=imports, + services=services, + ) + + +def render_init( + ir: SchemaIR, import_prefix: str, client_name: str | None = None +) -> str: imports = [] for svc in ir.services: mod_name = _module_name(svc.name) @@ -178,6 +222,17 @@ def render_init(ir: SchemaIR, import_prefix: str) -> str: mod = f"{import_prefix}{mod_name}_client" imports.append((mod, f"{svc.class_name}Client")) + if client_name: + if import_prefix == ".": + mod = "._root_client" + else: + mod = f"{import_prefix}_root_client" + imports.append((mod, client_name)) + + if ir.handshake_type: + types_mod = "._types" if import_prefix == "." else f"{import_prefix}_types" + imports.append((types_mod, ir.handshake_type.name)) + imports.sort(key=lambda x: x[0]) return _env.get_template("init.py.j2").render(imports=imports) @@ -192,6 +247,7 @@ def write_generated_files( ir: SchemaIR, output_dir: str, package: str | None = None, + client_name: str | None = None, ) -> list[str]: """Write all generated files to *output_dir*. @@ -215,6 +271,9 @@ def _write(name: str, content: str) -> None: render_service_client(svc, ir, import_prefix), ) - _write("__init__.py", render_init(ir, import_prefix)) + if client_name: + _write("_root_client.py", render_root_client(ir, client_name, import_prefix)) + + _write("__init__.py", render_init(ir, import_prefix, client_name=client_name)) return written diff --git a/python-client/river/codegen/schema.py b/python-client/river/codegen/schema.py index dcae9a5b..13611233 100644 --- a/python-client/river/codegen/schema.py +++ b/python-client/river/codegen/schema.py @@ -69,6 +69,7 @@ class SchemaIR: services: list[ServiceDef] = field(default_factory=list) typedicts: list[TypedDictDef] = field(default_factory=list) + handshake_type: TypedDictDef | None = None # --------------------------------------------------------------------------- @@ -118,31 +119,19 @@ def _to_snake_case(s: str) -> str: def _safe_field_name(name: str) -> str: - """Ensure a field name is a valid Python identifier. + """Normalize a property name into a valid Python identifier. - Raises ValueError if the name requires sanitization that would - change it from its wire representation, since TypedDict keys must - match the dict keys sent on the wire. + Strips characters illegal in identifiers (e.g. ``$kind`` → ``kind``) + and appends ``_`` to Python keywords. """ sanitized = _sanitize_identifier(name) - if sanitized != name: - raise ValueError( - f"schema property {name!r} is not a valid Python identifier " - f"and cannot be represented in a TypedDict" - ) - if keyword.iskeyword(name): - raise ValueError( - f"schema property {name!r} is a Python keyword " - f"and cannot be used as a TypedDict field" - ) + if keyword.iskeyword(sanitized): + sanitized += "_" # Names starting with __ (and not ending with __) are name-mangled - # inside class bodies, so the TypedDict key won't match the wire key. - if name.startswith("__") and not name.endswith("__"): - raise ValueError( - f"schema property {name!r} would be name-mangled in a " - f"TypedDict class body and cannot be used as a field" - ) - return name + # inside class bodies — prefix with underscore to avoid that. + if sanitized.startswith("__") and not sanitized.endswith("__"): + sanitized = "_" + sanitized + return sanitized # --------------------------------------------------------------------------- @@ -190,7 +179,19 @@ def convert(self, raw: dict) -> SchemaIR: svc_def = self._convert_service(svc_name, svc_data) services.append(svc_def) - return SchemaIR(services=services, typedicts=list(self._typedicts)) + # Parse optional handshake schema + handshake_type: TypedDictDef | None = None + hs_schema = raw.get("handshakeSchema") + if hs_schema and isinstance(hs_schema, dict): + self._schema_to_typeref(hs_schema, "HandshakeSchema") + # The TypedDict was just emitted — pop it off _typedicts + handshake_type = self._typedicts.pop() + + return SchemaIR( + services=services, + typedicts=list(self._typedicts), + handshake_type=handshake_type, + ) def _convert_service(self, name: str, data: dict) -> ServiceDef: class_name = _to_pascal_case(name) @@ -217,13 +218,17 @@ def _convert_procedure(self, svc_class: str, name: str, data: dict) -> Procedure proc_type = data["type"] prefix = svc_class + _to_pascal_case(name) - # Init type - init_type = self._schema_to_typeref(data["init"], f"{prefix}Init") - - # Input type (only for stream/upload) + # Init type and streaming input type. + # Two schema formats: + # - v2 (serializeSchema): all procedures have "init"; stream/upload also have "input" + # - v1 (pid2 etc.): rpc/subscription use "input" as init; stream/upload have "init" + "input" input_type = None - if "input" in data: - input_type = self._schema_to_typeref(data["input"], f"{prefix}Input") + if "init" in data: + init_type = self._schema_to_typeref(data["init"], f"{prefix}Init") + if "input" in data: + input_type = self._schema_to_typeref(data["input"], f"{prefix}Input") + else: + init_type = self._schema_to_typeref(data["input"], f"{prefix}Init") # Output type output_type = self._schema_to_typeref(data["output"], f"{prefix}Output") @@ -349,12 +354,9 @@ def _schema_to_typeref(self, schema: dict, name_hint: str) -> TypeRef: return TypeRef(annotation="Any") def _emit_typedict(self, td: TypedDictDef) -> None: - """Register a TypedDict, raising on name collision.""" + """Register a TypedDict, skipping if the same name was already emitted.""" if td.name in self._td_names: - raise ValueError( - f"TypedDict name {td.name!r} is already used; " - f"two schema properties map to the same generated class" - ) + return self._td_names.add(td.name) self._typedicts.append(td) @@ -365,8 +367,16 @@ def _convert_object(self, schema: dict, name: str) -> TypeRef: description = schema.get("description") fields: list[TypedDictField] = [] + seen_field_names: dict[str, str] = {} # normalized → original for prop_name, prop_schema in properties.items(): field_name = _safe_field_name(prop_name) + if field_name in seen_field_names: + raise ValueError( + f"TypedDict {name!r}: properties " + f"{seen_field_names[field_name]!r} and {prop_name!r} " + f"both normalize to field {field_name!r}" + ) + seen_field_names[field_name] = prop_name nested_name = name + _to_pascal_case(prop_name) field_ref = self._schema_to_typeref(prop_schema, nested_name) field_desc = ( @@ -412,20 +422,31 @@ def _convert_intersection(self, schema: dict, name_hint: str) -> TypeRef: else: other_variants.append(v) - # Merge all object properties - merged_props: dict[str, dict] = {} - merged_required: set[str] = set() - for v in object_variants: - for prop_name, prop_schema in v.get("properties", {}).items(): - merged_props[prop_name] = prop_schema - merged_required.update(v.get("required", [])) + # Mixed object + non-object is contradictory (object ∩ number = ∅) + if object_variants and other_variants: + return TypeRef(annotation="Never") + + # Pure object intersection — merge properties + if object_variants: + merged_props: dict[str, dict] = {} + merged_required: set[str] = set() + for v in object_variants: + for prop_name, prop_schema in v.get("properties", {}).items(): + merged_props[prop_name] = prop_schema + merged_required.update(v.get("required", [])) - # If we have object properties, emit a TypedDict - if merged_props or object_variants: description = schema.get("description") fields: list[TypedDictField] = [] + seen_field_names: dict[str, str] = {} for prop_name, prop_schema in merged_props.items(): field_name = _safe_field_name(prop_name) + if field_name in seen_field_names: + raise ValueError( + f"TypedDict {name_hint!r}: properties " + f"{seen_field_names[field_name]!r} and {prop_name!r} " + f"both normalize to field {field_name!r}" + ) + seen_field_names[field_name] = prop_name nested_name = name_hint + _to_pascal_case(prop_name) field_ref = self._schema_to_typeref(prop_schema, nested_name) field_desc = ( @@ -445,11 +466,8 @@ def _convert_intersection(self, schema: dict, name_hint: str) -> TypeRef: self._emit_typedict(td) return TypeRef(annotation=name_hint) - # No object variants — primitive intersection is unrepresentable - if other_variants: - return TypeRef(annotation="Never") - - return TypeRef(annotation="Any") + # Only non-object variants — contradictory primitive intersection + return TypeRef(annotation="Never") def _convert_union(self, schema: dict, name_hint: str) -> TypeRef: """Convert a JSON Schema anyOf to a Union type.""" diff --git a/python-client/river/codegen/templates/root_client.py.j2 b/python-client/river/codegen/templates/root_client.py.j2 new file mode 100644 index 00000000..6833d7c4 --- /dev/null +++ b/python-client/river/codegen/templates/root_client.py.j2 @@ -0,0 +1,18 @@ +"""Generated root client aggregating all service clients.""" + +from __future__ import annotations + +from river.client import RiverClient +{% for mod, cls in imports %} +from {{ mod }} import {{ cls }} +{% endfor %} + + +class {{ client_name }}: + """Aggregated client for all services.""" + + def __init__(self, client: RiverClient) -> None: + self._client = client +{% for attr_name, cls in services %} + self.{{ attr_name }} = {{ cls }}(client) +{% endfor %} diff --git a/python-client/river/codegen/templates/types.py.j2 b/python-client/river/codegen/templates/types.py.j2 index 39954ca7..0cc2eab9 100644 --- a/python-client/river/codegen/templates/types.py.j2 +++ b/python-client/river/codegen/templates/types.py.j2 @@ -19,7 +19,12 @@ class {{ td.name }}(TypedDict): pass {% else %} {% for f in td.fields %} +{% if f.description %} {{ f.name }}: {{ f.annotation }} + """{{ f.description | docstring }}""" +{% else %} + {{ f.name }}: {{ f.annotation }} +{% endif %} {% endfor %} {% endif %} {% endfor %} diff --git a/python-client/river/transport.py b/python-client/river/transport.py index 6ac8588e..ad202fb8 100644 --- a/python-client/river/transport.py +++ b/python-client/river/transport.py @@ -346,6 +346,15 @@ async def _do_handshake(self, session: Session, ws: Any, to: str) -> None: return status = payload.get("status", {}) + if not isinstance(status, dict): + logger.error( + "Invalid handshake status: expected dict, got %s", + type(status).__name__, + ) + await ws.close() + self._delete_session(to) + return + if not status.get("ok"): code = status.get("code", "UNKNOWN") reason = status.get("reason", "Unknown reason") diff --git a/python-client/tests/generated_v2/__init__.py b/python-client/tests/generated_v2/__init__.py new file mode 100644 index 00000000..3bd51d7a --- /dev/null +++ b/python-client/tests/generated_v2/__init__.py @@ -0,0 +1,11 @@ +"""Generated River service clients.""" + +from ._root_client import TestServer +from ._types import HandshakeSchema +from .test_client import TestClient + +__all__ = [ + "TestServer", + "HandshakeSchema", + "TestClient", +] diff --git a/python-client/tests/generated_v2/_errors.py b/python-client/tests/generated_v2/_errors.py new file mode 100644 index 00000000..ba3e8c37 --- /dev/null +++ b/python-client/tests/generated_v2/_errors.py @@ -0,0 +1,45 @@ +"""Protocol-level error types for the River protocol. + +These errors can be returned by any procedure regardless of its +service-specific error schema. +""" + +from __future__ import annotations + +from typing import Literal + +from typing_extensions import NotRequired, TypedDict + + +class UncaughtError(TypedDict): + code: Literal["UNCAUGHT_ERROR"] + message: str + + +class UnexpectedDisconnect(TypedDict): + code: Literal["UNEXPECTED_DISCONNECT"] + message: str + + +class InvalidRequestExtrasItem(TypedDict): + path: str + message: str + + +class InvalidRequestExtras(TypedDict): + firstValidationErrors: list[InvalidRequestExtrasItem] + totalErrors: float + + +class InvalidRequest(TypedDict): + code: Literal["INVALID_REQUEST"] + message: str + extras: NotRequired[InvalidRequestExtras] + + +class Cancel(TypedDict): + code: Literal["CANCEL"] + message: str + + +ProtocolError = UncaughtError | UnexpectedDisconnect | InvalidRequest | Cancel diff --git a/python-client/tests/generated_v2/_root_client.py b/python-client/tests/generated_v2/_root_client.py new file mode 100644 index 00000000..6ad791f4 --- /dev/null +++ b/python-client/tests/generated_v2/_root_client.py @@ -0,0 +1,14 @@ +"""Generated root client aggregating all service clients.""" + +from __future__ import annotations + +from river.client import RiverClient +from .test_client import TestClient + + +class TestServer: + """Aggregated client for all services.""" + + def __init__(self, client: RiverClient) -> None: + self._client = client + self.test = TestClient(client) diff --git a/python-client/tests/generated_v2/_types.py b/python-client/tests/generated_v2/_types.py new file mode 100644 index 00000000..acd1ebea --- /dev/null +++ b/python-client/tests/generated_v2/_types.py @@ -0,0 +1,17 @@ +"""Generated type definitions for River services.""" + +from __future__ import annotations + +from typing_extensions import TypedDict + + +class TestEchoInit(TypedDict): + msg: str + + +class TestEchoOutput(TypedDict): + response: str + + +class HandshakeSchema(TypedDict): + token: str diff --git a/python-client/tests/generated_v2/test_client.py b/python-client/tests/generated_v2/test_client.py new file mode 100644 index 00000000..68be4f38 --- /dev/null +++ b/python-client/tests/generated_v2/test_client.py @@ -0,0 +1,42 @@ +"""Generated client for the test service.""" + +from __future__ import annotations + +import asyncio +from typing import Any + +from river.client import ( + ErrResult, + OkResult, + RiverClient, + StreamResult, + SubscriptionResult, + UploadResult, +) + +from ._types import ( + TestEchoInit, + TestEchoOutput, +) + +from ._errors import ProtocolError + + +class TestClient: + """Typed client for the ``test`` service.""" + + def __init__(self, client: RiverClient) -> None: + self._client = client + + async def echo( + self, + init: TestEchoInit, + *, + abort_signal: asyncio.Event | None = None, + ) -> OkResult[TestEchoOutput] | ErrResult[ProtocolError]: + return await self._client.rpc( + "test", + "echo", + init, + abort_signal=abort_signal, + ) diff --git a/python-client/tests/test_codegen.py b/python-client/tests/test_codegen.py index 9d9f4a98..2ba8870d 100644 --- a/python-client/tests/test_codegen.py +++ b/python-client/tests/test_codegen.py @@ -387,16 +387,13 @@ async def test_fallible_rpc_error(self, server_url: str) -> None: class TestCodegenFieldNames: """Codegen field name validation tests.""" - def test_keyword_field_raises(self): - """Python keywords are rejected at codegen time.""" + def test_keyword_field_normalized(self): + """Python keywords get an underscore suffix.""" from river.codegen.schema import _safe_field_name - with pytest.raises(ValueError, match="Python keyword"): - _safe_field_name("from") - with pytest.raises(ValueError, match="Python keyword"): - _safe_field_name("class") - with pytest.raises(ValueError, match="Python keyword"): - _safe_field_name("import") + assert _safe_field_name("from") == "from_" + assert _safe_field_name("class") == "class_" + assert _safe_field_name("import") == "import_" def test_normal_field_unchanged(self): from river.codegen.schema import _safe_field_name @@ -415,19 +412,12 @@ def test_underscore_prefixed_field_accepted(self): assert _safe_field_name("_id") == "_id" assert _safe_field_name("_private") == "_private" - def test_dunder_field_rejected(self): - """Double-underscore-prefixed fields are name-mangled in class bodies. - - Regression: after allowing leading underscores, __dunder fields - were accepted but would be name-mangled in the generated TypedDict - class body, making the key not match the wire representation. - """ + def test_dunder_field_normalized(self): + """Double-underscore-prefixed fields get an extra underscore prefix.""" from river.codegen.schema import _safe_field_name - with pytest.raises(ValueError, match="name-mangled"): - _safe_field_name("__dunder") - with pytest.raises(ValueError, match="name-mangled"): - _safe_field_name("__private") + assert _safe_field_name("__dunder") == "___dunder" + assert _safe_field_name("__private") == "___private" # Dunder methods (ending with __) are NOT mangled assert _safe_field_name("__init__") == "__init__" @@ -451,15 +441,20 @@ def test_schema_with_underscore_prefixed_field(self): assert "_id" in field_names assert "name" in field_names - def test_dash_field_raises(self): - """Fields with dashes are rejected at codegen time.""" + def test_dash_field_normalized(self): + """Fields with dashes are normalized.""" + from river.codegen.schema import _safe_field_name + + assert _safe_field_name("request-id") == "request_id" + + def test_dollar_field_normalized(self): + """Fields with dollar signs are normalized.""" from river.codegen.schema import _safe_field_name - with pytest.raises(ValueError, match="not a valid Python identifier"): - _safe_field_name("request-id") + assert _safe_field_name("$kind") == "_kind" - def test_schema_with_invalid_field_raises(self): - """Codegen rejects schemas with non-identifier property names.""" + def test_schema_with_invalid_field_normalized(self): + """Codegen normalizes non-identifier property names.""" from river.codegen.schema import SchemaConverter converter = SchemaConverter() @@ -471,11 +466,15 @@ def test_schema_with_invalid_field_raises(self): }, "required": ["request-id", "normal"], } - with pytest.raises(ValueError, match="not a valid Python identifier"): - converter._schema_to_typeref(schema, "TestObj") + ref = converter._schema_to_typeref(schema, "TestObj") + assert ref.annotation == "TestObj" + td = converter._typedicts[-1] + field_names = [f.name for f in td.fields] + assert "request_id" in field_names + assert "normal" in field_names - def test_schema_with_keyword_field_raises(self): - """Codegen rejects schemas with keyword property names.""" + def test_schema_with_keyword_field_normalized(self): + """Codegen normalizes keyword property names.""" from river.codegen.schema import SchemaConverter converter = SchemaConverter() @@ -486,7 +485,25 @@ def test_schema_with_keyword_field_raises(self): }, "required": ["from"], } - with pytest.raises(ValueError, match="Python keyword"): + ref = converter._schema_to_typeref(schema, "TestObj") + assert ref.annotation == "TestObj" + td = converter._typedicts[-1] + assert td.fields[0].name == "from_" + + def test_collision_raises(self): + """Codegen raises when two properties normalize to the same name.""" + from river.codegen.schema import SchemaConverter + + converter = SchemaConverter() + schema = { + "type": "object", + "properties": { + "$kind": {"type": "string"}, + "_kind": {"type": "string"}, + }, + "required": ["$kind", "_kind"], + } + with pytest.raises(ValueError, match="both normalize to"): converter._schema_to_typeref(schema, "TestObj") def test_valid_schema_passes(self): @@ -605,8 +622,8 @@ def test_description_with_triple_quotes(self): assert '"""' not in _escape_docstring('bad """ doc') assert _escape_docstring('say """hello"""') == r"say \"\"\"hello\"\"\"" - def test_typedict_name_collision_raises(self): - """Two properties that generate the same TypedDict name are rejected.""" + def test_typedict_name_collision_deduplicates(self): + """Two properties that generate the same TypedDict name — first wins.""" from river.codegen.schema import SchemaConverter converter = SchemaConverter() @@ -623,8 +640,11 @@ def test_typedict_name_collision_raises(self): }, }, } - with pytest.raises(ValueError, match="already used"): - converter._schema_to_typeref(schema, "Prefix") + converter._schema_to_typeref(schema, "Prefix") + matching = [td for td in converter._typedicts if td.name == "PrefixFooBar"] + assert len(matching) == 1 + # First definition wins + assert matching[0].fields[0].name == "a" def test_empty_anyof_is_never(self): """anyOf with zero variants → Never.""" diff --git a/python-client/tests/test_e2e.py b/python-client/tests/test_e2e.py index 586abc95..3704a69d 100644 --- a/python-client/tests/test_e2e.py +++ b/python-client/tests/test_e2e.py @@ -1283,6 +1283,53 @@ def test_codec_adapter_rejects_wrong_ack_type(self): assert ok is False assert "ack" in result + def test_codec_adapter_rejects_missing_control_flags(self): + """CodecMessageAdapter rejects messages without controlFlags.""" + from river.codec import BinaryCodec, CodecMessageAdapter + + adapter = CodecMessageAdapter(BinaryCodec()) + raw = BinaryCodec().to_buffer( + { + "id": "m1", + "from": "s", + "to": "c", + "seq": 0, + "ack": 0, + "payload": {}, + "streamId": "st1", + # controlFlags omitted + } + ) + ok, result = adapter.from_buffer(raw) + assert ok is False + assert "controlFlags" in result + + def test_binary_codec_bigint_js_safe_range(self): + """Ints beyond JS MAX_SAFE_INTEGER use bigint extension.""" + from river.codec import BinaryCodec + + codec = BinaryCodec() + just_over = 2**53 + 1 + buf = codec.to_buffer({"n": just_over}) + decoded = codec.from_buffer(buf) + assert decoded["n"] == just_over + + # Value at the boundary should still be a normal int + at_boundary = 2**53 - 1 + buf2 = codec.to_buffer({"n": at_boundary}) + decoded2 = codec.from_buffer(buf2) + assert decoded2["n"] == at_boundary + + def test_binary_codec_negative_bigint_js_safe_range(self): + """Negative ints beyond -MAX_SAFE_INTEGER use bigint extension.""" + from river.codec import BinaryCodec + + codec = BinaryCodec() + just_under = -(2**53 + 1) + buf = codec.to_buffer({"n": just_under}) + decoded = codec.from_buffer(buf) + assert decoded["n"] == just_under + # ===================================================================== # Lifecycle / Cleanup Tests @@ -1612,6 +1659,55 @@ def test_invalid_message_destroys_session(self): assert len(errors) == 1 assert errors[0]["type"] == "invalid_message" + @pytest.mark.asyncio + async def test_malformed_handshake_status_closes_ws(self): + """Non-dict handshake status closes the socket cleanly.""" + from unittest.mock import AsyncMock + + from river.codec import BinaryCodec, CodecMessageAdapter + from river.session import Session, SessionState + from river.transport import WebSocketClientTransport + from river.types import TransportMessage + + transport = WebSocketClientTransport( + ws_url="ws://127.0.0.1:1", + client_id="client", + server_id="server", + codec=BinaryCodec(), + ) + codec_adapter = CodecMessageAdapter(BinaryCodec()) + session = Session("s1", "client", "server", codec_adapter) + session.state = SessionState.HANDSHAKING + transport.sessions["server"] = session + + # Build a handshake response with non-dict status + resp_msg = TransportMessage( + id="hs", + from_="server", + to="client", + seq=0, + ack=0, + payload={ + "type": "HANDSHAKE_RESP", + "status": "oops", # should be dict + }, + stream_id="heartbeat", + control_flags=1, + ) + ok, resp_bytes = codec_adapter.to_buffer(resp_msg) + assert ok + + ws = AsyncMock() + ws.recv = AsyncMock(return_value=resp_bytes) + ws.close = AsyncMock() + + await transport._do_handshake(session, ws, "server") + + # WebSocket should have been closed + ws.close.assert_awaited_once() + # Session should be deleted + assert "server" not in transport.sessions + def test_readable_broken_after_async_for_break(self): """Breaking out of async for marks readable as broken.""" from river.streams import Readable diff --git a/python-client/tests/test_schema_handshake.json b/python-client/tests/test_schema_handshake.json new file mode 100644 index 00000000..7b0ab22a --- /dev/null +++ b/python-client/tests/test_schema_handshake.json @@ -0,0 +1,142 @@ +{ + "services": { + "test": { + "procedures": { + "echo": { + "init": { + "type": "object", + "properties": { + "msg": { + "type": "string" + } + }, + "required": [ + "msg" + ] + }, + "output": { + "type": "object", + "properties": { + "response": { + "type": "string" + } + }, + "required": [ + "response" + ] + }, + "errors": { + "anyOf": [ + { + "type": "object", + "properties": { + "code": { + "const": "UNCAUGHT_ERROR", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "UNEXPECTED_DISCONNECT", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "INVALID_REQUEST", + "type": "string" + }, + "message": { + "type": "string" + }, + "extras": { + "type": "object", + "properties": { + "firstValidationErrors": { + "type": "array", + "items": { + "type": "object", + "properties": { + "path": { + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "path", + "message" + ] + } + }, + "totalErrors": { + "type": "number" + } + }, + "required": [ + "firstValidationErrors", + "totalErrors" + ] + } + }, + "required": [ + "code", + "message" + ] + }, + { + "type": "object", + "properties": { + "code": { + "const": "CANCEL", + "type": "string" + }, + "message": { + "type": "string" + } + }, + "required": [ + "code", + "message" + ] + } + ] + }, + "type": "rpc" + } + } + } + }, + "handshakeSchema": { + "type": "object", + "properties": { + "token": { + "type": "string" + } + }, + "required": [ + "token" + ] + } +} \ No newline at end of file From db10d60b7725ad14d3891315491d3091811e6d3c Mon Sep 17 00:00:00 2001 From: Jacky Zhao Date: Tue, 3 Mar 2026 14:44:56 -0800 Subject: [PATCH 27/29] fix --- python-client/river/client.py | 5 ++- python-client/river/codegen/__main__.py | 3 +- python-client/river/codegen/emitter.py | 16 +++---- python-client/river/codegen/schema.py | 6 ++- .../tests/generated_v2/_root_client.py | 1 + .../tests/generated_v2/test_client.py | 7 +-- .../tests/test_schema_handshake.json | 44 +++++-------------- 7 files changed, 30 insertions(+), 52 deletions(-) diff --git a/python-client/river/client.py b/python-client/river/client.py index 39dce15a..7098414d 100644 --- a/python-client/river/client.py +++ b/python-client/river/client.py @@ -10,10 +10,13 @@ import logging from collections.abc import Awaitable, Callable from dataclasses import dataclass -from typing import Any, Generic, Literal, TypeVar +from typing import TYPE_CHECKING, Any, Generic, Literal, TypeVar from typing_extensions import TypedDict +if TYPE_CHECKING: + from river.session import SessionOptions + from river.session import SessionState from river.streams import Readable, Writable from river.transport import WebSocketClientTransport diff --git a/python-client/river/codegen/__main__.py b/python-client/river/codegen/__main__.py index 9c46594b..78727f7d 100644 --- a/python-client/river/codegen/__main__.py +++ b/python-client/river/codegen/__main__.py @@ -38,7 +38,8 @@ def main(argv: list[str] | None = None) -> None: parser.add_argument( "--client-name", default=None, - help="Generate a root client class with this name that aggregates all services.", + help="Generate a root client class with this name " + "that aggregates all services.", ) args = parser.parse_args(argv) diff --git a/python-client/river/codegen/emitter.py b/python-client/river/codegen/emitter.py index 0896fd16..7a41de05 100644 --- a/python-client/river/codegen/emitter.py +++ b/python-client/river/codegen/emitter.py @@ -98,11 +98,13 @@ def _prepare_typedicts(ir: SchemaIR) -> list[dict]: for td in ir.typedicts: fields = [] for f in td.fields: - fields.append({ - "name": f.name, - "annotation": _field_annotation(f), - "description": f.description, - }) + fields.append( + { + "name": f.name, + "annotation": _field_annotation(f), + "description": f.description, + } + ) result.append( {"name": td.name, "description": td.description, "fields": fields} ) @@ -185,9 +187,7 @@ def _module_name(service_name: str) -> str: return _sanitize_identifier(service_name) -def render_root_client( - ir: SchemaIR, client_name: str, import_prefix: str -) -> str: +def render_root_client(ir: SchemaIR, client_name: str, import_prefix: str) -> str: imports = [] services = [] for svc in ir.services: diff --git a/python-client/river/codegen/schema.py b/python-client/river/codegen/schema.py index 13611233..28aef9d1 100644 --- a/python-client/river/codegen/schema.py +++ b/python-client/river/codegen/schema.py @@ -220,8 +220,10 @@ def _convert_procedure(self, svc_class: str, name: str, data: dict) -> Procedure # Init type and streaming input type. # Two schema formats: - # - v2 (serializeSchema): all procedures have "init"; stream/upload also have "input" - # - v1 (pid2 etc.): rpc/subscription use "input" as init; stream/upload have "init" + "input" + # - v2 (serializeSchema): all procedures have "init"; + # stream/upload also have "input" + # - v1 (pid2 etc.): rpc/subscription use "input" as init; + # stream/upload have "init" + "input" input_type = None if "init" in data: init_type = self._schema_to_typeref(data["init"], f"{prefix}Init") diff --git a/python-client/tests/generated_v2/_root_client.py b/python-client/tests/generated_v2/_root_client.py index 6ad791f4..fe144829 100644 --- a/python-client/tests/generated_v2/_root_client.py +++ b/python-client/tests/generated_v2/_root_client.py @@ -3,6 +3,7 @@ from __future__ import annotations from river.client import RiverClient + from .test_client import TestClient diff --git a/python-client/tests/generated_v2/test_client.py b/python-client/tests/generated_v2/test_client.py index 68be4f38..f92f2e6d 100644 --- a/python-client/tests/generated_v2/test_client.py +++ b/python-client/tests/generated_v2/test_client.py @@ -3,24 +3,19 @@ from __future__ import annotations import asyncio -from typing import Any from river.client import ( ErrResult, OkResult, RiverClient, - StreamResult, - SubscriptionResult, - UploadResult, ) +from ._errors import ProtocolError from ._types import ( TestEchoInit, TestEchoOutput, ) -from ._errors import ProtocolError - class TestClient: """Typed client for the ``test`` service.""" diff --git a/python-client/tests/test_schema_handshake.json b/python-client/tests/test_schema_handshake.json index 7b0ab22a..d2f90bce 100644 --- a/python-client/tests/test_schema_handshake.json +++ b/python-client/tests/test_schema_handshake.json @@ -10,9 +10,7 @@ "type": "string" } }, - "required": [ - "msg" - ] + "required": ["msg"] }, "output": { "type": "object", @@ -21,9 +19,7 @@ "type": "string" } }, - "required": [ - "response" - ] + "required": ["response"] }, "errors": { "anyOf": [ @@ -38,10 +34,7 @@ "type": "string" } }, - "required": [ - "code", - "message" - ] + "required": ["code", "message"] }, { "type": "object", @@ -54,10 +47,7 @@ "type": "string" } }, - "required": [ - "code", - "message" - ] + "required": ["code", "message"] }, { "type": "object", @@ -84,26 +74,17 @@ "type": "string" } }, - "required": [ - "path", - "message" - ] + "required": ["path", "message"] } }, "totalErrors": { "type": "number" } }, - "required": [ - "firstValidationErrors", - "totalErrors" - ] + "required": ["firstValidationErrors", "totalErrors"] } }, - "required": [ - "code", - "message" - ] + "required": ["code", "message"] }, { "type": "object", @@ -116,10 +97,7 @@ "type": "string" } }, - "required": [ - "code", - "message" - ] + "required": ["code", "message"] } ] }, @@ -135,8 +113,6 @@ "type": "string" } }, - "required": [ - "token" - ] + "required": ["token"] } -} \ No newline at end of file +} From 09c6887a500dd3211041a089151ee70a8fd56320 Mon Sep 17 00:00:00 2001 From: Jacky Zhao Date: Tue, 3 Mar 2026 17:18:36 -0800 Subject: [PATCH 28/29] fix --- python-client/tests/test_codegen.py | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/python-client/tests/test_codegen.py b/python-client/tests/test_codegen.py index 2ba8870d..0a287fb0 100644 --- a/python-client/tests/test_codegen.py +++ b/python-client/tests/test_codegen.py @@ -1180,8 +1180,8 @@ def test_allof_with_nested_objects(self): assert "Envelope" in td_names assert "EnvelopeMeta" in td_names - def test_allof_mixed_types_merges_objects(self): - """allOf with object + primitive → object properties still merged.""" + def test_allof_mixed_types_is_never(self): + """allOf with object + primitive → Never (contradictory).""" schema = { "allOf": [ { @@ -1193,11 +1193,7 @@ def test_allof_mixed_types_merges_objects(self): ] } ref, tds = self._convert(schema, "Mixed") - # Object properties are merged; primitive constraint is ignored - assert ref.annotation == "Mixed" - td = next(td for td in tds if td.name == "Mixed") - assert len(td.fields) == 1 - assert td.fields[0].name == "x" + assert ref.annotation == "Never" def test_allof_only_primitives_is_never(self): """allOf with only primitives → Never (contradictory intersection).""" From 3bd89c21bf23de6812c81b660115c52f90d9dd83 Mon Sep 17 00:00:00 2001 From: Jacky Zhao Date: Tue, 3 Mar 2026 22:31:00 -0800 Subject: [PATCH 29/29] more fix --- python-client/river/codec.py | 14 +- python-client/river/codegen/emitter.py | 6 +- python-client/river/codegen/schema.py | 16 + python-client/tests/test_codegen.py | 308 +++++++++++++++- python-client/tests/test_e2e.py | 33 ++ python-client/uv.lock | 479 +++++++++++++++++++++++++ 6 files changed, 849 insertions(+), 7 deletions(-) create mode 100644 python-client/uv.lock diff --git a/python-client/river/codec.py b/python-client/river/codec.py index 62d33738..16c914d1 100644 --- a/python-client/river/codec.py +++ b/python-client/river/codec.py @@ -37,7 +37,7 @@ class BinaryCodec(Codec): def to_buffer(self, obj: dict[str, Any]) -> bytes: import msgpack - return msgpack.packb(obj, use_bin_type=True, default=self._ext_encode) + return msgpack.packb(self._prepare(obj), use_bin_type=True) def from_buffer(self, buf: bytes) -> dict[str, Any]: import msgpack @@ -45,16 +45,20 @@ def from_buffer(self, buf: bytes) -> dict[str, Any]: return msgpack.unpackb(buf, raw=False, ext_hook=self._ext_decode) @staticmethod - def _ext_encode(obj: Any) -> Any: + def _prepare(obj: Any) -> Any: + """Walk *obj* and replace ints outside JS safe range with ExtType.""" import msgpack - if isinstance(obj, int) and ( + if isinstance(obj, dict): + return {k: BinaryCodec._prepare(v) for k, v in obj.items()} + if isinstance(obj, (list, tuple)): + return [BinaryCodec._prepare(v) for v in obj] + if isinstance(obj, int) and not isinstance(obj, bool) and ( obj > _MAX_SAFE_INTEGER or obj < _MIN_SAFE_INTEGER ): - # Encode as string in extension type 0 (matches TS BigInt ext) data = msgpack.packb(str(obj), use_bin_type=True) return msgpack.ExtType(_BIGINT_EXT_TYPE, data) - raise TypeError(f"Unknown type: {type(obj)}") + return obj @staticmethod def _ext_decode(code: int, data: bytes) -> Any: diff --git a/python-client/river/codegen/emitter.py b/python-client/river/codegen/emitter.py index 7a41de05..1a4c5454 100644 --- a/python-client/river/codegen/emitter.py +++ b/python-client/river/codegen/emitter.py @@ -31,7 +31,11 @@ def _escape_docstring(s: str) -> str: """Escape a string for use inside triple-quoted docstrings.""" - return s.replace("\\", "\\\\").replace('"""', r"\"\"\"") + s = s.replace("\\", "\\\\").replace('"""', r"\"\"\"") + # A trailing " would merge with the closing """ to form """", breaking syntax. + if s.endswith('"'): + s = s[:-1] + r"\"" + return s _env.filters["docstring"] = _escape_docstring diff --git a/python-client/river/codegen/schema.py b/python-client/river/codegen/schema.py index 28aef9d1..ff85ad71 100644 --- a/python-client/river/codegen/schema.py +++ b/python-client/river/codegen/schema.py @@ -364,6 +364,22 @@ def _emit_typedict(self, td: TypedDictDef) -> None: def _convert_object(self, schema: dict, name: str) -> TypeRef: """Convert a JSON Schema object to a TypedDict and return a ref to it.""" + # patternProperties with a catch-all pattern → dict[str, ValueType] + pattern_props = schema.get("patternProperties", {}) + if pattern_props and not schema.get("properties"): + values = list(pattern_props.values()) + if len(values) == 1: + value_ref = self._schema_to_typeref(values[0], f"{name}Value") + val_ann = value_ref.annotation + else: + value_refs = [ + self._schema_to_typeref(v, f"{name}Value{i}") + for i, v in enumerate(values) + ] + unique = list(dict.fromkeys(r.annotation for r in value_refs)) + val_ann = unique[0] if len(unique) == 1 else " | ".join(unique) + return TypeRef(annotation=f"dict[str, {val_ann}]") + properties = schema.get("properties", {}) required_set = set(schema.get("required", [])) description = schema.get("description") diff --git a/python-client/tests/test_codegen.py b/python-client/tests/test_codegen.py index 0a287fb0..43ed1c10 100644 --- a/python-client/tests/test_codegen.py +++ b/python-client/tests/test_codegen.py @@ -620,7 +620,25 @@ def test_description_with_triple_quotes(self): from river.codegen.emitter import _escape_docstring assert '"""' not in _escape_docstring('bad """ doc') - assert _escape_docstring('say """hello"""') == r"say \"\"\"hello\"\"\"" + # Internal triple quotes are escaped; trailing " also escaped + result = _escape_docstring('say """hello"""') + assert '"""' not in result + assert not result.endswith('"') or result.endswith(r'\"') + + def test_description_ending_with_quote(self): + """Trailing quote is escaped to avoid merging with closing triple-quote.""" + from river.codegen.emitter import _escape_docstring + + result = _escape_docstring('example: "hello"') + # Must not end with unescaped " which would form """" with closing """ + assert not result.endswith('"') or result.endswith(r'\"') + assert result == r'example: "hello\"' + + def test_description_ending_without_quote(self): + """Non-quote endings are left unchanged.""" + from river.codegen.emitter import _escape_docstring + + assert _escape_docstring("normal text") == "normal text" def test_typedict_name_collision_deduplicates(self): """Two properties that generate the same TypedDict name — first wins.""" @@ -1357,3 +1375,291 @@ def test_service_with_complex_types(self): tags_field = next((f for f in init_td.fields if f.name == "tags"), None) assert tags_field is not None assert "list[" in tags_field.type_ref.annotation + + +class TestPatternProperties: + """Test codegen handling of patternProperties (dynamic dict keys).""" + + def _convert(self, schema: dict, name: str = "Test"): + from river.codegen.schema import SchemaConverter + + converter = SchemaConverter() + ref = converter._schema_to_typeref(schema, name) + return ref, converter._typedicts + + def test_simple_string_values(self): + """patternProperties with string values → dict[str, str].""" + schema = { + "type": "object", + "patternProperties": { + "^(.*)$": {"type": "string"}, + }, + } + ref, tds = self._convert(schema, "Env") + assert ref.annotation == "dict[str, str]" + assert len(tds) == 0 # no TypedDict emitted + + def test_object_values(self): + """patternProperties with object values → dict[str, TypedDict].""" + schema = { + "type": "object", + "patternProperties": { + "^(.*)$": { + "type": "object", + "properties": { + "name": {"type": "string"}, + "version": {"type": "string"}, + }, + "required": ["name", "version"], + }, + }, + } + ref, tds = self._convert(schema, "Packages") + assert ref.annotation == "dict[str, PackagesValue]" + assert len(tds) == 1 + td = tds[0] + assert td.name == "PackagesValue" + field_names = {f.name for f in td.fields} + assert field_names == {"name", "version"} + + def test_nested_object_values(self): + """patternProperties where values have nested structure.""" + schema = { + "type": "object", + "patternProperties": { + "^(.*)$": { + "type": "object", + "properties": { + "all": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": {"type": "string"}, + "version": {"type": "string"}, + }, + "required": ["name", "version"], + }, + }, + "required": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": {"type": "string"}, + "version": {"type": "string"}, + }, + "required": ["name", "version"], + }, + }, + }, + }, + }, + } + ref, tds = self._convert(schema, "InstalledPackages") + assert ref.annotation == "dict[str, InstalledPackagesValue]" + + value_td = next(td for td in tds if td.name == "InstalledPackagesValue") + field_names = {f.name for f in value_td.fields} + assert "all" in field_names + assert "required" in field_names + + all_field = next(f for f in value_td.fields if f.name == "all") + assert "list[" in all_field.type_ref.annotation + + def test_integer_pattern_keys(self): + """patternProperties with integer-only pattern still becomes dict[str, ...].""" + schema = { + "type": "object", + "patternProperties": { + "^(0|[1-9][0-9]*)$": { + "type": "object", + "properties": {"id": {"type": "number"}}, + "required": ["id"], + }, + }, + } + ref, tds = self._convert(schema, "IntMap") + assert ref.annotation == "dict[str, IntMapValue]" + + def test_multiple_patterns_union(self): + """Multiple patternProperties produce a union value type.""" + schema = { + "type": "object", + "patternProperties": { + "^a": {"type": "string"}, + "^b": {"type": "integer"}, + }, + } + ref, tds = self._convert(schema, "Multi") + assert ref.annotation == "dict[str, str | int]" + assert len(tds) == 0 + + def test_multiple_patterns_same_type(self): + """Multiple patternProperties with the same type collapse to one.""" + schema = { + "type": "object", + "patternProperties": { + "^a": {"type": "string"}, + "^b": {"type": "string"}, + }, + } + ref, tds = self._convert(schema, "Same") + assert ref.annotation == "dict[str, str]" + + def test_properties_take_precedence(self): + """Object with properties (not patternProperties) → TypedDict, not dict.""" + schema = { + "type": "object", + "properties": { + "name": {"type": "string"}, + }, + "required": ["name"], + } + ref, tds = self._convert(schema, "Named") + assert ref.annotation == "Named" + assert len(tds) == 1 + + def test_pattern_properties_in_parent_object(self): + """patternProperties nested inside a normal object with properties.""" + schema = { + "type": "object", + "properties": { + "kind": {"const": "packages", "type": "string"}, + "packages": { + "type": "object", + "patternProperties": { + "^(.*)$": { + "type": "object", + "properties": { + "count": {"type": "integer"}, + }, + "required": ["count"], + }, + }, + }, + }, + "required": ["kind", "packages"], + } + ref, tds = self._convert(schema, "Output") + assert ref.annotation == "Output" + + output_td = next(td for td in tds if td.name == "Output") + packages_field = next(f for f in output_td.fields if f.name == "packages") + assert packages_field.type_ref.annotation == "dict[str, OutputPackagesValue]" + + def test_e2e_pattern_properties_codegen(self, tmp_path): + """End-to-end: schema with patternProperties → generated code → importable.""" + from river.codegen.emitter import write_generated_files + from river.codegen.schema import SchemaConverter + + schema = { + "services": { + "registry": { + "procedures": { + "listPackages": { + "type": "rpc", + "input": { + "type": "object", + "properties": { + "language": {"type": "string"}, + }, + "required": ["language"], + }, + "output": { + "type": "object", + "properties": { + "packages": { + "type": "object", + "patternProperties": { + "^(.*)$": { + "type": "object", + "properties": { + "all": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "version": { + "type": "string" + }, + }, + "required": [ + "name", + "version", + ], + }, + }, + }, + }, + }, + }, + }, + "required": ["packages"], + }, + "errors": { + "anyOf": [ + { + "type": "object", + "properties": { + "code": { + "const": "UNCAUGHT_ERROR", + "type": "string", + }, + "message": {"type": "string"}, + }, + "required": ["code", "message"], + } + ], + }, + }, + }, + }, + }, + } + + converter = SchemaConverter() + ir = converter.convert(schema) + + output_dir = str(tmp_path / "generated") + os.makedirs(output_dir, exist_ok=True) + write_generated_files(ir, output_dir) + + # Verify generated types file is valid Python + types_path = os.path.join(output_dir, "_types.py") + assert os.path.exists(types_path) + with open(types_path) as f: + source = f.read() + + # Should contain dict[str, ...] not an empty TypedDict + assert "dict[str," in source + assert "pass" not in source or "pass" in source.split("class")[0] + + # Verify it compiles + compile(source, types_path, "exec") + + # Import and check the type at runtime + import importlib.util + + spec = importlib.util.spec_from_file_location("_types", types_path) + mod = importlib.util.module_from_spec(spec) + spec.loader.exec_module(mod) # type: ignore + + # The output type should reference the value TypedDict via dict[str, ...] + assert hasattr(mod, "RegistryListPackagesOutput") + assert hasattr(mod, "RegistryListPackagesOutputPackagesValue") + assert hasattr(mod, "RegistryListPackagesOutputPackagesValueAllItem") + + # Check the annotation references dict[str, ...] with the value type + ann = str(mod.RegistryListPackagesOutput.__annotations__["packages"]) + assert "dict[str," in ann + assert "RegistryListPackagesOutputPackagesValue" in ann + + # The value type has an 'all' field with a list of items + value_ann = str( + mod.RegistryListPackagesOutputPackagesValue.__annotations__["all"] + ) + assert "list[" in value_ann diff --git a/python-client/tests/test_e2e.py b/python-client/tests/test_e2e.py index 3704a69d..852b0541 100644 --- a/python-client/tests/test_e2e.py +++ b/python-client/tests/test_e2e.py @@ -1330,6 +1330,39 @@ def test_binary_codec_negative_bigint_js_safe_range(self): decoded = codec.from_buffer(buf) assert decoded["n"] == just_under + def test_binary_codec_bigint_uses_ext_type(self): + """Large ints are encoded as msgpack ExtType, not native ints.""" + import msgpack + + from river.codec import BinaryCodec + + codec = BinaryCodec() + big = 2**53 + 1 + buf = codec.to_buffer({"n": big}) + # Unpack raw (without ext_hook) to verify the value is an ExtType + raw = msgpack.unpackb(buf, raw=False) + assert isinstance(raw["n"], msgpack.ExtType) + assert raw["n"].code == 0 + + def test_binary_codec_bigint_nested(self): + """Large ints nested in lists and dicts are encoded as ExtType.""" + from river.codec import BinaryCodec + + codec = BinaryCodec() + big = 2**53 + 1 + obj = {"a": [big], "b": {"c": big}} + decoded = codec.from_buffer(codec.to_buffer(obj)) + assert decoded["a"][0] == big + assert decoded["b"]["c"] == big + + def test_binary_codec_bool_not_treated_as_bigint(self): + """Booleans (subclass of int) should not be converted to ExtType.""" + from river.codec import BinaryCodec + + codec = BinaryCodec() + decoded = codec.from_buffer(codec.to_buffer({"flag": True})) + assert decoded["flag"] is True + # ===================================================================== # Lifecycle / Cleanup Tests diff --git a/python-client/uv.lock b/python-client/uv.lock new file mode 100644 index 00000000..94d56717 --- /dev/null +++ b/python-client/uv.lock @@ -0,0 +1,479 @@ +version = 1 +requires-python = ">=3.10" + +[[package]] +name = "backports-asyncio-runner" +version = "1.2.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8e/ff/70dca7d7cb1cbc0edb2c6cc0c38b65cba36cccc491eca64cabd5fe7f8670/backports_asyncio_runner-1.2.0.tar.gz", hash = "sha256:a5aa7b2b7d8f8bfcaa2b57313f70792df84e32a2a746f585213373f900b42162", size = 69893 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a0/59/76ab57e3fe74484f48a53f8e337171b4a2349e506eabe136d7e01d059086/backports_asyncio_runner-1.2.0-py3-none-any.whl", hash = "sha256:0da0a936a8aeb554eccb426dc55af3ba63bcdc69fa1a600b5bb305413a4477b5", size = 12313 }, +] + +[[package]] +name = "colorama" +version = "0.4.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335 }, +] + +[[package]] +name = "exceptiongroup" +version = "1.3.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/50/79/66800aadf48771f6b62f7eb014e352e5d06856655206165d775e675a02c9/exceptiongroup-1.3.1.tar.gz", hash = "sha256:8b412432c6055b0b7d14c310000ae93352ed6754f70fa8f7c34141f91c4e3219", size = 30371 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8a/0e/97c33bf5009bdbac74fd2beace167cab3f978feb69cc36f1ef79360d6c4e/exceptiongroup-1.3.1-py3-none-any.whl", hash = "sha256:a7a39a3bd276781e98394987d3a5701d0c4edffb633bb7a5144577f82c773598", size = 16740 }, +] + +[[package]] +name = "importlib-metadata" +version = "8.7.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "zipp" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f3/49/3b30cad09e7771a4982d9975a8cbf64f00d4a1ececb53297f1d9a7be1b10/importlib_metadata-8.7.1.tar.gz", hash = "sha256:49fef1ae6440c182052f407c8d34a68f72efc36db9ca90dc0113398f2fdde8bb", size = 57107 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fa/5e/f8e9a1d23b9c20a551a8a02ea3637b4642e22c2626e3a13a9a29cdea99eb/importlib_metadata-8.7.1-py3-none-any.whl", hash = "sha256:5a1f80bf1daa489495071efbb095d75a634cf28a8bc299581244063b53176151", size = 27865 }, +] + +[[package]] +name = "iniconfig" +version = "2.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/72/34/14ca021ce8e5dfedc35312d08ba8bf51fdd999c576889fc2c24cb97f4f10/iniconfig-2.3.0.tar.gz", hash = "sha256:c76315c77db068650d49c5b56314774a7804df16fee4402c1f19d6d15d8c4730", size = 20503 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cb/b1/3846dd7f199d53cb17f49cba7e651e9ce294d8497c8c150530ed11865bb8/iniconfig-2.3.0-py3-none-any.whl", hash = "sha256:f631c04d2c48c52b84d0d0549c99ff3859c98df65b3101406327ecc7d53fbf12", size = 7484 }, +] + +[[package]] +name = "jinja2" +version = "3.1.6" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markupsafe" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/df/bf/f7da0350254c0ed7c72f3e33cef02e048281fec7ecec5f032d4aac52226b/jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d", size = 245115 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/62/a1/3d680cbfd5f4b8f15abc1d571870c5fc3e594bb582bc3b64ea099db13e56/jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67", size = 134899 }, +] + +[[package]] +name = "markupsafe" +version = "3.0.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/7e/99/7690b6d4034fffd95959cbe0c02de8deb3098cc577c67bb6a24fe5d7caa7/markupsafe-3.0.3.tar.gz", hash = "sha256:722695808f4b6457b320fdc131280796bdceb04ab50fe1795cd540799ebe1698", size = 80313 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e8/4b/3541d44f3937ba468b75da9eebcae497dcf67adb65caa16760b0a6807ebb/markupsafe-3.0.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2f981d352f04553a7171b8e44369f2af4055f888dfb147d55e42d29e29e74559", size = 11631 }, + { url = "https://files.pythonhosted.org/packages/98/1b/fbd8eed11021cabd9226c37342fa6ca4e8a98d8188a8d9b66740494960e4/markupsafe-3.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e1c1493fb6e50ab01d20a22826e57520f1284df32f2d8601fdd90b6304601419", size = 12057 }, + { url = "https://files.pythonhosted.org/packages/40/01/e560d658dc0bb8ab762670ece35281dec7b6c1b33f5fbc09ebb57a185519/markupsafe-3.0.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1ba88449deb3de88bd40044603fafffb7bc2b055d626a330323a9ed736661695", size = 22050 }, + { url = "https://files.pythonhosted.org/packages/af/cd/ce6e848bbf2c32314c9b237839119c5a564a59725b53157c856e90937b7a/markupsafe-3.0.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f42d0984e947b8adf7dd6dde396e720934d12c506ce84eea8476409563607591", size = 20681 }, + { url = "https://files.pythonhosted.org/packages/c9/2a/b5c12c809f1c3045c4d580b035a743d12fcde53cf685dbc44660826308da/markupsafe-3.0.3-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:c0c0b3ade1c0b13b936d7970b1d37a57acde9199dc2aecc4c336773e1d86049c", size = 20705 }, + { url = "https://files.pythonhosted.org/packages/cf/e3/9427a68c82728d0a88c50f890d0fc072a1484de2f3ac1ad0bfc1a7214fd5/markupsafe-3.0.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:0303439a41979d9e74d18ff5e2dd8c43ed6c6001fd40e5bf2e43f7bd9bbc523f", size = 21524 }, + { url = "https://files.pythonhosted.org/packages/bc/36/23578f29e9e582a4d0278e009b38081dbe363c5e7165113fad546918a232/markupsafe-3.0.3-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:d2ee202e79d8ed691ceebae8e0486bd9a2cd4794cec4824e1c99b6f5009502f6", size = 20282 }, + { url = "https://files.pythonhosted.org/packages/56/21/dca11354e756ebd03e036bd8ad58d6d7168c80ce1fe5e75218e4945cbab7/markupsafe-3.0.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:177b5253b2834fe3678cb4a5f0059808258584c559193998be2601324fdeafb1", size = 20745 }, + { url = "https://files.pythonhosted.org/packages/87/99/faba9369a7ad6e4d10b6a5fbf71fa2a188fe4a593b15f0963b73859a1bbd/markupsafe-3.0.3-cp310-cp310-win32.whl", hash = "sha256:2a15a08b17dd94c53a1da0438822d70ebcd13f8c3a95abe3a9ef9f11a94830aa", size = 14571 }, + { url = "https://files.pythonhosted.org/packages/d6/25/55dc3ab959917602c96985cb1253efaa4ff42f71194bddeb61eb7278b8be/markupsafe-3.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:c4ffb7ebf07cfe8931028e3e4c85f0357459a3f9f9490886198848f4fa002ec8", size = 15056 }, + { url = "https://files.pythonhosted.org/packages/d0/9e/0a02226640c255d1da0b8d12e24ac2aa6734da68bff14c05dd53b94a0fc3/markupsafe-3.0.3-cp310-cp310-win_arm64.whl", hash = "sha256:e2103a929dfa2fcaf9bb4e7c091983a49c9ac3b19c9061b6d5427dd7d14d81a1", size = 13932 }, + { url = "https://files.pythonhosted.org/packages/08/db/fefacb2136439fc8dd20e797950e749aa1f4997ed584c62cfb8ef7c2be0e/markupsafe-3.0.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1cc7ea17a6824959616c525620e387f6dd30fec8cb44f649e31712db02123dad", size = 11631 }, + { url = "https://files.pythonhosted.org/packages/e1/2e/5898933336b61975ce9dc04decbc0a7f2fee78c30353c5efba7f2d6ff27a/markupsafe-3.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4bd4cd07944443f5a265608cc6aab442e4f74dff8088b0dfc8238647b8f6ae9a", size = 12058 }, + { url = "https://files.pythonhosted.org/packages/1d/09/adf2df3699d87d1d8184038df46a9c80d78c0148492323f4693df54e17bb/markupsafe-3.0.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6b5420a1d9450023228968e7e6a9ce57f65d148ab56d2313fcd589eee96a7a50", size = 24287 }, + { url = "https://files.pythonhosted.org/packages/30/ac/0273f6fcb5f42e314c6d8cd99effae6a5354604d461b8d392b5ec9530a54/markupsafe-3.0.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0bf2a864d67e76e5c9a34dc26ec616a66b9888e25e7b9460e1c76d3293bd9dbf", size = 22940 }, + { url = "https://files.pythonhosted.org/packages/19/ae/31c1be199ef767124c042c6c3e904da327a2f7f0cd63a0337e1eca2967a8/markupsafe-3.0.3-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:bc51efed119bc9cfdf792cdeaa4d67e8f6fcccab66ed4bfdd6bde3e59bfcbb2f", size = 21887 }, + { url = "https://files.pythonhosted.org/packages/b2/76/7edcab99d5349a4532a459e1fe64f0b0467a3365056ae550d3bcf3f79e1e/markupsafe-3.0.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:068f375c472b3e7acbe2d5318dea141359e6900156b5b2ba06a30b169086b91a", size = 23692 }, + { url = "https://files.pythonhosted.org/packages/a4/28/6e74cdd26d7514849143d69f0bf2399f929c37dc2b31e6829fd2045b2765/markupsafe-3.0.3-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:7be7b61bb172e1ed687f1754f8e7484f1c8019780f6f6b0786e76bb01c2ae115", size = 21471 }, + { url = "https://files.pythonhosted.org/packages/62/7e/a145f36a5c2945673e590850a6f8014318d5577ed7e5920a4b3448e0865d/markupsafe-3.0.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f9e130248f4462aaa8e2552d547f36ddadbeaa573879158d721bbd33dfe4743a", size = 22923 }, + { url = "https://files.pythonhosted.org/packages/0f/62/d9c46a7f5c9adbeeeda52f5b8d802e1094e9717705a645efc71b0913a0a8/markupsafe-3.0.3-cp311-cp311-win32.whl", hash = "sha256:0db14f5dafddbb6d9208827849fad01f1a2609380add406671a26386cdf15a19", size = 14572 }, + { url = "https://files.pythonhosted.org/packages/83/8a/4414c03d3f891739326e1783338e48fb49781cc915b2e0ee052aa490d586/markupsafe-3.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:de8a88e63464af587c950061a5e6a67d3632e36df62b986892331d4620a35c01", size = 15077 }, + { url = "https://files.pythonhosted.org/packages/35/73/893072b42e6862f319b5207adc9ae06070f095b358655f077f69a35601f0/markupsafe-3.0.3-cp311-cp311-win_arm64.whl", hash = "sha256:3b562dd9e9ea93f13d53989d23a7e775fdfd1066c33494ff43f5418bc8c58a5c", size = 13876 }, + { url = "https://files.pythonhosted.org/packages/5a/72/147da192e38635ada20e0a2e1a51cf8823d2119ce8883f7053879c2199b5/markupsafe-3.0.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d53197da72cc091b024dd97249dfc7794d6a56530370992a5e1a08983ad9230e", size = 11615 }, + { url = "https://files.pythonhosted.org/packages/9a/81/7e4e08678a1f98521201c3079f77db69fb552acd56067661f8c2f534a718/markupsafe-3.0.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1872df69a4de6aead3491198eaf13810b565bdbeec3ae2dc8780f14458ec73ce", size = 12020 }, + { url = "https://files.pythonhosted.org/packages/1e/2c/799f4742efc39633a1b54a92eec4082e4f815314869865d876824c257c1e/markupsafe-3.0.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3a7e8ae81ae39e62a41ec302f972ba6ae23a5c5396c8e60113e9066ef893da0d", size = 24332 }, + { url = "https://files.pythonhosted.org/packages/3c/2e/8d0c2ab90a8c1d9a24f0399058ab8519a3279d1bd4289511d74e909f060e/markupsafe-3.0.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d6dd0be5b5b189d31db7cda48b91d7e0a9795f31430b7f271219ab30f1d3ac9d", size = 22947 }, + { url = "https://files.pythonhosted.org/packages/2c/54/887f3092a85238093a0b2154bd629c89444f395618842e8b0c41783898ea/markupsafe-3.0.3-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:94c6f0bb423f739146aec64595853541634bde58b2135f27f61c1ffd1cd4d16a", size = 21962 }, + { url = "https://files.pythonhosted.org/packages/c9/2f/336b8c7b6f4a4d95e91119dc8521402461b74a485558d8f238a68312f11c/markupsafe-3.0.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:be8813b57049a7dc738189df53d69395eba14fb99345e0a5994914a3864c8a4b", size = 23760 }, + { url = "https://files.pythonhosted.org/packages/32/43/67935f2b7e4982ffb50a4d169b724d74b62a3964bc1a9a527f5ac4f1ee2b/markupsafe-3.0.3-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:83891d0e9fb81a825d9a6d61e3f07550ca70a076484292a70fde82c4b807286f", size = 21529 }, + { url = "https://files.pythonhosted.org/packages/89/e0/4486f11e51bbba8b0c041098859e869e304d1c261e59244baa3d295d47b7/markupsafe-3.0.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:77f0643abe7495da77fb436f50f8dab76dbc6e5fd25d39589a0f1fe6548bfa2b", size = 23015 }, + { url = "https://files.pythonhosted.org/packages/2f/e1/78ee7a023dac597a5825441ebd17170785a9dab23de95d2c7508ade94e0e/markupsafe-3.0.3-cp312-cp312-win32.whl", hash = "sha256:d88b440e37a16e651bda4c7c2b930eb586fd15ca7406cb39e211fcff3bf3017d", size = 14540 }, + { url = "https://files.pythonhosted.org/packages/aa/5b/bec5aa9bbbb2c946ca2733ef9c4ca91c91b6a24580193e891b5f7dbe8e1e/markupsafe-3.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:26a5784ded40c9e318cfc2bdb30fe164bdb8665ded9cd64d500a34fb42067b1c", size = 15105 }, + { url = "https://files.pythonhosted.org/packages/e5/f1/216fc1bbfd74011693a4fd837e7026152e89c4bcf3e77b6692fba9923123/markupsafe-3.0.3-cp312-cp312-win_arm64.whl", hash = "sha256:35add3b638a5d900e807944a078b51922212fb3dedb01633a8defc4b01a3c85f", size = 13906 }, + { url = "https://files.pythonhosted.org/packages/38/2f/907b9c7bbba283e68f20259574b13d005c121a0fa4c175f9bed27c4597ff/markupsafe-3.0.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e1cf1972137e83c5d4c136c43ced9ac51d0e124706ee1c8aa8532c1287fa8795", size = 11622 }, + { url = "https://files.pythonhosted.org/packages/9c/d9/5f7756922cdd676869eca1c4e3c0cd0df60ed30199ffd775e319089cb3ed/markupsafe-3.0.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:116bb52f642a37c115f517494ea5feb03889e04df47eeff5b130b1808ce7c219", size = 12029 }, + { url = "https://files.pythonhosted.org/packages/00/07/575a68c754943058c78f30db02ee03a64b3c638586fba6a6dd56830b30a3/markupsafe-3.0.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:133a43e73a802c5562be9bbcd03d090aa5a1fe899db609c29e8c8d815c5f6de6", size = 24374 }, + { url = "https://files.pythonhosted.org/packages/a9/21/9b05698b46f218fc0e118e1f8168395c65c8a2c750ae2bab54fc4bd4e0e8/markupsafe-3.0.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ccfcd093f13f0f0b7fdd0f198b90053bf7b2f02a3927a30e63f3ccc9df56b676", size = 22980 }, + { url = "https://files.pythonhosted.org/packages/7f/71/544260864f893f18b6827315b988c146b559391e6e7e8f7252839b1b846a/markupsafe-3.0.3-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:509fa21c6deb7a7a273d629cf5ec029bc209d1a51178615ddf718f5918992ab9", size = 21990 }, + { url = "https://files.pythonhosted.org/packages/c2/28/b50fc2f74d1ad761af2f5dcce7492648b983d00a65b8c0e0cb457c82ebbe/markupsafe-3.0.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a4afe79fb3de0b7097d81da19090f4df4f8d3a2b3adaa8764138aac2e44f3af1", size = 23784 }, + { url = "https://files.pythonhosted.org/packages/ed/76/104b2aa106a208da8b17a2fb72e033a5a9d7073c68f7e508b94916ed47a9/markupsafe-3.0.3-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:795e7751525cae078558e679d646ae45574b47ed6e7771863fcc079a6171a0fc", size = 21588 }, + { url = "https://files.pythonhosted.org/packages/b5/99/16a5eb2d140087ebd97180d95249b00a03aa87e29cc224056274f2e45fd6/markupsafe-3.0.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8485f406a96febb5140bfeca44a73e3ce5116b2501ac54fe953e488fb1d03b12", size = 23041 }, + { url = "https://files.pythonhosted.org/packages/19/bc/e7140ed90c5d61d77cea142eed9f9c303f4c4806f60a1044c13e3f1471d0/markupsafe-3.0.3-cp313-cp313-win32.whl", hash = "sha256:bdd37121970bfd8be76c5fb069c7751683bdf373db1ed6c010162b2a130248ed", size = 14543 }, + { url = "https://files.pythonhosted.org/packages/05/73/c4abe620b841b6b791f2edc248f556900667a5a1cf023a6646967ae98335/markupsafe-3.0.3-cp313-cp313-win_amd64.whl", hash = "sha256:9a1abfdc021a164803f4d485104931fb8f8c1efd55bc6b748d2f5774e78b62c5", size = 15113 }, + { url = "https://files.pythonhosted.org/packages/f0/3a/fa34a0f7cfef23cf9500d68cb7c32dd64ffd58a12b09225fb03dd37d5b80/markupsafe-3.0.3-cp313-cp313-win_arm64.whl", hash = "sha256:7e68f88e5b8799aa49c85cd116c932a1ac15caaa3f5db09087854d218359e485", size = 13911 }, + { url = "https://files.pythonhosted.org/packages/e4/d7/e05cd7efe43a88a17a37b3ae96e79a19e846f3f456fe79c57ca61356ef01/markupsafe-3.0.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:218551f6df4868a8d527e3062d0fb968682fe92054e89978594c28e642c43a73", size = 11658 }, + { url = "https://files.pythonhosted.org/packages/99/9e/e412117548182ce2148bdeacdda3bb494260c0b0184360fe0d56389b523b/markupsafe-3.0.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:3524b778fe5cfb3452a09d31e7b5adefeea8c5be1d43c4f810ba09f2ceb29d37", size = 12066 }, + { url = "https://files.pythonhosted.org/packages/bc/e6/fa0ffcda717ef64a5108eaa7b4f5ed28d56122c9a6d70ab8b72f9f715c80/markupsafe-3.0.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4e885a3d1efa2eadc93c894a21770e4bc67899e3543680313b09f139e149ab19", size = 25639 }, + { url = "https://files.pythonhosted.org/packages/96/ec/2102e881fe9d25fc16cb4b25d5f5cde50970967ffa5dddafdb771237062d/markupsafe-3.0.3-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8709b08f4a89aa7586de0aadc8da56180242ee0ada3999749b183aa23df95025", size = 23569 }, + { url = "https://files.pythonhosted.org/packages/4b/30/6f2fce1f1f205fc9323255b216ca8a235b15860c34b6798f810f05828e32/markupsafe-3.0.3-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:b8512a91625c9b3da6f127803b166b629725e68af71f8184ae7e7d54686a56d6", size = 23284 }, + { url = "https://files.pythonhosted.org/packages/58/47/4a0ccea4ab9f5dcb6f79c0236d954acb382202721e704223a8aafa38b5c8/markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:9b79b7a16f7fedff2495d684f2b59b0457c3b493778c9eed31111be64d58279f", size = 24801 }, + { url = "https://files.pythonhosted.org/packages/6a/70/3780e9b72180b6fecb83a4814d84c3bf4b4ae4bf0b19c27196104149734c/markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:12c63dfb4a98206f045aa9563db46507995f7ef6d83b2f68eda65c307c6829eb", size = 22769 }, + { url = "https://files.pythonhosted.org/packages/98/c5/c03c7f4125180fc215220c035beac6b9cb684bc7a067c84fc69414d315f5/markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:8f71bc33915be5186016f675cd83a1e08523649b0e33efdb898db577ef5bb009", size = 23642 }, + { url = "https://files.pythonhosted.org/packages/80/d6/2d1b89f6ca4bff1036499b1e29a1d02d282259f3681540e16563f27ebc23/markupsafe-3.0.3-cp313-cp313t-win32.whl", hash = "sha256:69c0b73548bc525c8cb9a251cddf1931d1db4d2258e9599c28c07ef3580ef354", size = 14612 }, + { url = "https://files.pythonhosted.org/packages/2b/98/e48a4bfba0a0ffcf9925fe2d69240bfaa19c6f7507b8cd09c70684a53c1e/markupsafe-3.0.3-cp313-cp313t-win_amd64.whl", hash = "sha256:1b4b79e8ebf6b55351f0d91fe80f893b4743f104bff22e90697db1590e47a218", size = 15200 }, + { url = "https://files.pythonhosted.org/packages/0e/72/e3cc540f351f316e9ed0f092757459afbc595824ca724cbc5a5d4263713f/markupsafe-3.0.3-cp313-cp313t-win_arm64.whl", hash = "sha256:ad2cf8aa28b8c020ab2fc8287b0f823d0a7d8630784c31e9ee5edea20f406287", size = 13973 }, + { url = "https://files.pythonhosted.org/packages/33/8a/8e42d4838cd89b7dde187011e97fe6c3af66d8c044997d2183fbd6d31352/markupsafe-3.0.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:eaa9599de571d72e2daf60164784109f19978b327a3910d3e9de8c97b5b70cfe", size = 11619 }, + { url = "https://files.pythonhosted.org/packages/b5/64/7660f8a4a8e53c924d0fa05dc3a55c9cee10bbd82b11c5afb27d44b096ce/markupsafe-3.0.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c47a551199eb8eb2121d4f0f15ae0f923d31350ab9280078d1e5f12b249e0026", size = 12029 }, + { url = "https://files.pythonhosted.org/packages/da/ef/e648bfd021127bef5fa12e1720ffed0c6cbb8310c8d9bea7266337ff06de/markupsafe-3.0.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f34c41761022dd093b4b6896d4810782ffbabe30f2d443ff5f083e0cbbb8c737", size = 24408 }, + { url = "https://files.pythonhosted.org/packages/41/3c/a36c2450754618e62008bf7435ccb0f88053e07592e6028a34776213d877/markupsafe-3.0.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:457a69a9577064c05a97c41f4e65148652db078a3a509039e64d3467b9e7ef97", size = 23005 }, + { url = "https://files.pythonhosted.org/packages/bc/20/b7fdf89a8456b099837cd1dc21974632a02a999ec9bf7ca3e490aacd98e7/markupsafe-3.0.3-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:e8afc3f2ccfa24215f8cb28dcf43f0113ac3c37c2f0f0806d8c70e4228c5cf4d", size = 22048 }, + { url = "https://files.pythonhosted.org/packages/9a/a7/591f592afdc734f47db08a75793a55d7fbcc6902a723ae4cfbab61010cc5/markupsafe-3.0.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:ec15a59cf5af7be74194f7ab02d0f59a62bdcf1a537677ce67a2537c9b87fcda", size = 23821 }, + { url = "https://files.pythonhosted.org/packages/7d/33/45b24e4f44195b26521bc6f1a82197118f74df348556594bd2262bda1038/markupsafe-3.0.3-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:0eb9ff8191e8498cca014656ae6b8d61f39da5f95b488805da4bb029cccbfbaf", size = 21606 }, + { url = "https://files.pythonhosted.org/packages/ff/0e/53dfaca23a69fbfbbf17a4b64072090e70717344c52eaaaa9c5ddff1e5f0/markupsafe-3.0.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:2713baf880df847f2bece4230d4d094280f4e67b1e813eec43b4c0e144a34ffe", size = 23043 }, + { url = "https://files.pythonhosted.org/packages/46/11/f333a06fc16236d5238bfe74daccbca41459dcd8d1fa952e8fbd5dccfb70/markupsafe-3.0.3-cp314-cp314-win32.whl", hash = "sha256:729586769a26dbceff69f7a7dbbf59ab6572b99d94576a5592625d5b411576b9", size = 14747 }, + { url = "https://files.pythonhosted.org/packages/28/52/182836104b33b444e400b14f797212f720cbc9ed6ba34c800639d154e821/markupsafe-3.0.3-cp314-cp314-win_amd64.whl", hash = "sha256:bdc919ead48f234740ad807933cdf545180bfbe9342c2bb451556db2ed958581", size = 15341 }, + { url = "https://files.pythonhosted.org/packages/6f/18/acf23e91bd94fd7b3031558b1f013adfa21a8e407a3fdb32745538730382/markupsafe-3.0.3-cp314-cp314-win_arm64.whl", hash = "sha256:5a7d5dc5140555cf21a6fefbdbf8723f06fcd2f63ef108f2854de715e4422cb4", size = 14073 }, + { url = "https://files.pythonhosted.org/packages/3c/f0/57689aa4076e1b43b15fdfa646b04653969d50cf30c32a102762be2485da/markupsafe-3.0.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:1353ef0c1b138e1907ae78e2f6c63ff67501122006b0f9abad68fda5f4ffc6ab", size = 11661 }, + { url = "https://files.pythonhosted.org/packages/89/c3/2e67a7ca217c6912985ec766c6393b636fb0c2344443ff9d91404dc4c79f/markupsafe-3.0.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:1085e7fbddd3be5f89cc898938f42c0b3c711fdcb37d75221de2666af647c175", size = 12069 }, + { url = "https://files.pythonhosted.org/packages/f0/00/be561dce4e6ca66b15276e184ce4b8aec61fe83662cce2f7d72bd3249d28/markupsafe-3.0.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1b52b4fb9df4eb9ae465f8d0c228a00624de2334f216f178a995ccdcf82c4634", size = 25670 }, + { url = "https://files.pythonhosted.org/packages/50/09/c419f6f5a92e5fadde27efd190eca90f05e1261b10dbd8cbcb39cd8ea1dc/markupsafe-3.0.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fed51ac40f757d41b7c48425901843666a6677e3e8eb0abcff09e4ba6e664f50", size = 23598 }, + { url = "https://files.pythonhosted.org/packages/22/44/a0681611106e0b2921b3033fc19bc53323e0b50bc70cffdd19f7d679bb66/markupsafe-3.0.3-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:f190daf01f13c72eac4efd5c430a8de82489d9cff23c364c3ea822545032993e", size = 23261 }, + { url = "https://files.pythonhosted.org/packages/5f/57/1b0b3f100259dc9fffe780cfb60d4be71375510e435efec3d116b6436d43/markupsafe-3.0.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:e56b7d45a839a697b5eb268c82a71bd8c7f6c94d6fd50c3d577fa39a9f1409f5", size = 24835 }, + { url = "https://files.pythonhosted.org/packages/26/6a/4bf6d0c97c4920f1597cc14dd720705eca0bf7c787aebc6bb4d1bead5388/markupsafe-3.0.3-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:f3e98bb3798ead92273dc0e5fd0f31ade220f59a266ffd8a4f6065e0a3ce0523", size = 22733 }, + { url = "https://files.pythonhosted.org/packages/14/c7/ca723101509b518797fedc2fdf79ba57f886b4aca8a7d31857ba3ee8281f/markupsafe-3.0.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:5678211cb9333a6468fb8d8be0305520aa073f50d17f089b5b4b477ea6e67fdc", size = 23672 }, + { url = "https://files.pythonhosted.org/packages/fb/df/5bd7a48c256faecd1d36edc13133e51397e41b73bb77e1a69deab746ebac/markupsafe-3.0.3-cp314-cp314t-win32.whl", hash = "sha256:915c04ba3851909ce68ccc2b8e2cd691618c4dc4c4232fb7982bca3f41fd8c3d", size = 14819 }, + { url = "https://files.pythonhosted.org/packages/1a/8a/0402ba61a2f16038b48b39bccca271134be00c5c9f0f623208399333c448/markupsafe-3.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:4faffd047e07c38848ce017e8725090413cd80cbc23d86e55c587bf979e579c9", size = 15426 }, + { url = "https://files.pythonhosted.org/packages/70/bc/6f1c2f612465f5fa89b95bead1f44dcb607670fd42891d8fdcd5d039f4f4/markupsafe-3.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:32001d6a8fc98c8cb5c947787c5d08b0a50663d139f1305bac5885d98d9b40fa", size = 14146 }, +] + +[[package]] +name = "msgpack" +version = "1.1.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/4d/f2/bfb55a6236ed8725a96b0aa3acbd0ec17588e6a2c3b62a93eb513ed8783f/msgpack-1.1.2.tar.gz", hash = "sha256:3b60763c1373dd60f398488069bcdc703cd08a711477b5d480eecc9f9626f47e", size = 173581 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f5/a2/3b68a9e769db68668b25c6108444a35f9bd163bb848c0650d516761a59c0/msgpack-1.1.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0051fffef5a37ca2cd16978ae4f0aef92f164df86823871b5162812bebecd8e2", size = 81318 }, + { url = "https://files.pythonhosted.org/packages/5b/e1/2b720cc341325c00be44e1ed59e7cfeae2678329fbf5aa68f5bda57fe728/msgpack-1.1.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a605409040f2da88676e9c9e5853b3449ba8011973616189ea5ee55ddbc5bc87", size = 83786 }, + { url = "https://files.pythonhosted.org/packages/71/e5/c2241de64bfceac456b140737812a2ab310b10538a7b34a1d393b748e095/msgpack-1.1.2-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8b696e83c9f1532b4af884045ba7f3aa741a63b2bc22617293a2c6a7c645f251", size = 398240 }, + { url = "https://files.pythonhosted.org/packages/b7/09/2a06956383c0fdebaef5aa9246e2356776f12ea6f2a44bd1368abf0e46c4/msgpack-1.1.2-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:365c0bbe981a27d8932da71af63ef86acc59ed5c01ad929e09a0b88c6294e28a", size = 406070 }, + { url = "https://files.pythonhosted.org/packages/0e/74/2957703f0e1ef20637d6aead4fbb314330c26f39aa046b348c7edcf6ca6b/msgpack-1.1.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:41d1a5d875680166d3ac5c38573896453bbbea7092936d2e107214daf43b1d4f", size = 393403 }, + { url = "https://files.pythonhosted.org/packages/a5/09/3bfc12aa90f77b37322fc33e7a8a7c29ba7c8edeadfa27664451801b9860/msgpack-1.1.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:354e81bcdebaab427c3df4281187edc765d5d76bfb3a7c125af9da7a27e8458f", size = 398947 }, + { url = "https://files.pythonhosted.org/packages/4b/4f/05fcebd3b4977cb3d840f7ef6b77c51f8582086de5e642f3fefee35c86fc/msgpack-1.1.2-cp310-cp310-win32.whl", hash = "sha256:e64c8d2f5e5d5fda7b842f55dec6133260ea8f53c4257d64494c534f306bf7a9", size = 64769 }, + { url = "https://files.pythonhosted.org/packages/d0/3e/b4547e3a34210956382eed1c85935fff7e0f9b98be3106b3745d7dec9c5e/msgpack-1.1.2-cp310-cp310-win_amd64.whl", hash = "sha256:db6192777d943bdaaafb6ba66d44bf65aa0e9c5616fa1d2da9bb08828c6b39aa", size = 71293 }, + { url = "https://files.pythonhosted.org/packages/2c/97/560d11202bcd537abca693fd85d81cebe2107ba17301de42b01ac1677b69/msgpack-1.1.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2e86a607e558d22985d856948c12a3fa7b42efad264dca8a3ebbcfa2735d786c", size = 82271 }, + { url = "https://files.pythonhosted.org/packages/83/04/28a41024ccbd67467380b6fb440ae916c1e4f25e2cd4c63abe6835ac566e/msgpack-1.1.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:283ae72fc89da59aa004ba147e8fc2f766647b1251500182fac0350d8af299c0", size = 84914 }, + { url = "https://files.pythonhosted.org/packages/71/46/b817349db6886d79e57a966346cf0902a426375aadc1e8e7a86a75e22f19/msgpack-1.1.2-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:61c8aa3bd513d87c72ed0b37b53dd5c5a0f58f2ff9f26e1555d3bd7948fb7296", size = 416962 }, + { url = "https://files.pythonhosted.org/packages/da/e0/6cc2e852837cd6086fe7d8406af4294e66827a60a4cf60b86575a4a65ca8/msgpack-1.1.2-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:454e29e186285d2ebe65be34629fa0e8605202c60fbc7c4c650ccd41870896ef", size = 426183 }, + { url = "https://files.pythonhosted.org/packages/25/98/6a19f030b3d2ea906696cedd1eb251708e50a5891d0978b012cb6107234c/msgpack-1.1.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:7bc8813f88417599564fafa59fd6f95be417179f76b40325b500b3c98409757c", size = 411454 }, + { url = "https://files.pythonhosted.org/packages/b7/cd/9098fcb6adb32187a70b7ecaabf6339da50553351558f37600e53a4a2a23/msgpack-1.1.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bafca952dc13907bdfdedfc6a5f579bf4f292bdd506fadb38389afa3ac5b208e", size = 422341 }, + { url = "https://files.pythonhosted.org/packages/e6/ae/270cecbcf36c1dc85ec086b33a51a4d7d08fc4f404bdbc15b582255d05ff/msgpack-1.1.2-cp311-cp311-win32.whl", hash = "sha256:602b6740e95ffc55bfb078172d279de3773d7b7db1f703b2f1323566b878b90e", size = 64747 }, + { url = "https://files.pythonhosted.org/packages/2a/79/309d0e637f6f37e83c711f547308b91af02b72d2326ddd860b966080ef29/msgpack-1.1.2-cp311-cp311-win_amd64.whl", hash = "sha256:d198d275222dc54244bf3327eb8cbe00307d220241d9cec4d306d49a44e85f68", size = 71633 }, + { url = "https://files.pythonhosted.org/packages/73/4d/7c4e2b3d9b1106cd0aa6cb56cc57c6267f59fa8bfab7d91df5adc802c847/msgpack-1.1.2-cp311-cp311-win_arm64.whl", hash = "sha256:86f8136dfa5c116365a8a651a7d7484b65b13339731dd6faebb9a0242151c406", size = 64755 }, + { url = "https://files.pythonhosted.org/packages/ad/bd/8b0d01c756203fbab65d265859749860682ccd2a59594609aeec3a144efa/msgpack-1.1.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:70a0dff9d1f8da25179ffcf880e10cf1aad55fdb63cd59c9a49a1b82290062aa", size = 81939 }, + { url = "https://files.pythonhosted.org/packages/34/68/ba4f155f793a74c1483d4bdef136e1023f7bcba557f0db4ef3db3c665cf1/msgpack-1.1.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:446abdd8b94b55c800ac34b102dffd2f6aa0ce643c55dfc017ad89347db3dbdb", size = 85064 }, + { url = "https://files.pythonhosted.org/packages/f2/60/a064b0345fc36c4c3d2c743c82d9100c40388d77f0b48b2f04d6041dbec1/msgpack-1.1.2-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c63eea553c69ab05b6747901b97d620bb2a690633c77f23feb0c6a947a8a7b8f", size = 417131 }, + { url = "https://files.pythonhosted.org/packages/65/92/a5100f7185a800a5d29f8d14041f61475b9de465ffcc0f3b9fba606e4505/msgpack-1.1.2-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:372839311ccf6bdaf39b00b61288e0557916c3729529b301c52c2d88842add42", size = 427556 }, + { url = "https://files.pythonhosted.org/packages/f5/87/ffe21d1bf7d9991354ad93949286f643b2bb6ddbeab66373922b44c3b8cc/msgpack-1.1.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2929af52106ca73fcb28576218476ffbb531a036c2adbcf54a3664de124303e9", size = 404920 }, + { url = "https://files.pythonhosted.org/packages/ff/41/8543ed2b8604f7c0d89ce066f42007faac1eaa7d79a81555f206a5cdb889/msgpack-1.1.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:be52a8fc79e45b0364210eef5234a7cf8d330836d0a64dfbb878efa903d84620", size = 415013 }, + { url = "https://files.pythonhosted.org/packages/41/0d/2ddfaa8b7e1cee6c490d46cb0a39742b19e2481600a7a0e96537e9c22f43/msgpack-1.1.2-cp312-cp312-win32.whl", hash = "sha256:1fff3d825d7859ac888b0fbda39a42d59193543920eda9d9bea44d958a878029", size = 65096 }, + { url = "https://files.pythonhosted.org/packages/8c/ec/d431eb7941fb55a31dd6ca3404d41fbb52d99172df2e7707754488390910/msgpack-1.1.2-cp312-cp312-win_amd64.whl", hash = "sha256:1de460f0403172cff81169a30b9a92b260cb809c4cb7e2fc79ae8d0510c78b6b", size = 72708 }, + { url = "https://files.pythonhosted.org/packages/c5/31/5b1a1f70eb0e87d1678e9624908f86317787b536060641d6798e3cf70ace/msgpack-1.1.2-cp312-cp312-win_arm64.whl", hash = "sha256:be5980f3ee0e6bd44f3a9e9dea01054f175b50c3e6cdb692bc9424c0bbb8bf69", size = 64119 }, + { url = "https://files.pythonhosted.org/packages/6b/31/b46518ecc604d7edf3a4f94cb3bf021fc62aa301f0cb849936968164ef23/msgpack-1.1.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:4efd7b5979ccb539c221a4c4e16aac1a533efc97f3b759bb5a5ac9f6d10383bf", size = 81212 }, + { url = "https://files.pythonhosted.org/packages/92/dc/c385f38f2c2433333345a82926c6bfa5ecfff3ef787201614317b58dd8be/msgpack-1.1.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:42eefe2c3e2af97ed470eec850facbe1b5ad1d6eacdbadc42ec98e7dcf68b4b7", size = 84315 }, + { url = "https://files.pythonhosted.org/packages/d3/68/93180dce57f684a61a88a45ed13047558ded2be46f03acb8dec6d7c513af/msgpack-1.1.2-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1fdf7d83102bf09e7ce3357de96c59b627395352a4024f6e2458501f158bf999", size = 412721 }, + { url = "https://files.pythonhosted.org/packages/5d/ba/459f18c16f2b3fc1a1ca871f72f07d70c07bf768ad0a507a698b8052ac58/msgpack-1.1.2-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fac4be746328f90caa3cd4bc67e6fe36ca2bf61d5c6eb6d895b6527e3f05071e", size = 424657 }, + { url = "https://files.pythonhosted.org/packages/38/f8/4398c46863b093252fe67368b44edc6c13b17f4e6b0e4929dbf0bdb13f23/msgpack-1.1.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:fffee09044073e69f2bad787071aeec727183e7580443dfeb8556cbf1978d162", size = 402668 }, + { url = "https://files.pythonhosted.org/packages/28/ce/698c1eff75626e4124b4d78e21cca0b4cc90043afb80a507626ea354ab52/msgpack-1.1.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5928604de9b032bc17f5099496417f113c45bc6bc21b5c6920caf34b3c428794", size = 419040 }, + { url = "https://files.pythonhosted.org/packages/67/32/f3cd1667028424fa7001d82e10ee35386eea1408b93d399b09fb0aa7875f/msgpack-1.1.2-cp313-cp313-win32.whl", hash = "sha256:a7787d353595c7c7e145e2331abf8b7ff1e6673a6b974ded96e6d4ec09f00c8c", size = 65037 }, + { url = "https://files.pythonhosted.org/packages/74/07/1ed8277f8653c40ebc65985180b007879f6a836c525b3885dcc6448ae6cb/msgpack-1.1.2-cp313-cp313-win_amd64.whl", hash = "sha256:a465f0dceb8e13a487e54c07d04ae3ba131c7c5b95e2612596eafde1dccf64a9", size = 72631 }, + { url = "https://files.pythonhosted.org/packages/e5/db/0314e4e2db56ebcf450f277904ffd84a7988b9e5da8d0d61ab2d057df2b6/msgpack-1.1.2-cp313-cp313-win_arm64.whl", hash = "sha256:e69b39f8c0aa5ec24b57737ebee40be647035158f14ed4b40e6f150077e21a84", size = 64118 }, + { url = "https://files.pythonhosted.org/packages/22/71/201105712d0a2ff07b7873ed3c220292fb2ea5120603c00c4b634bcdafb3/msgpack-1.1.2-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:e23ce8d5f7aa6ea6d2a2b326b4ba46c985dbb204523759984430db7114f8aa00", size = 81127 }, + { url = "https://files.pythonhosted.org/packages/1b/9f/38ff9e57a2eade7bf9dfee5eae17f39fc0e998658050279cbb14d97d36d9/msgpack-1.1.2-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:6c15b7d74c939ebe620dd8e559384be806204d73b4f9356320632d783d1f7939", size = 84981 }, + { url = "https://files.pythonhosted.org/packages/8e/a9/3536e385167b88c2cc8f4424c49e28d49a6fc35206d4a8060f136e71f94c/msgpack-1.1.2-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:99e2cb7b9031568a2a5c73aa077180f93dd2e95b4f8d3b8e14a73ae94a9e667e", size = 411885 }, + { url = "https://files.pythonhosted.org/packages/2f/40/dc34d1a8d5f1e51fc64640b62b191684da52ca469da9cd74e84936ffa4a6/msgpack-1.1.2-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:180759d89a057eab503cf62eeec0aa61c4ea1200dee709f3a8e9397dbb3b6931", size = 419658 }, + { url = "https://files.pythonhosted.org/packages/3b/ef/2b92e286366500a09a67e03496ee8b8ba00562797a52f3c117aa2b29514b/msgpack-1.1.2-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:04fb995247a6e83830b62f0b07bf36540c213f6eac8e851166d8d86d83cbd014", size = 403290 }, + { url = "https://files.pythonhosted.org/packages/78/90/e0ea7990abea5764e4655b8177aa7c63cdfa89945b6e7641055800f6c16b/msgpack-1.1.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:8e22ab046fa7ede9e36eeb4cfad44d46450f37bb05d5ec482b02868f451c95e2", size = 415234 }, + { url = "https://files.pythonhosted.org/packages/72/4e/9390aed5db983a2310818cd7d3ec0aecad45e1f7007e0cda79c79507bb0d/msgpack-1.1.2-cp314-cp314-win32.whl", hash = "sha256:80a0ff7d4abf5fecb995fcf235d4064b9a9a8a40a3ab80999e6ac1e30b702717", size = 66391 }, + { url = "https://files.pythonhosted.org/packages/6e/f1/abd09c2ae91228c5f3998dbd7f41353def9eac64253de3c8105efa2082f7/msgpack-1.1.2-cp314-cp314-win_amd64.whl", hash = "sha256:9ade919fac6a3e7260b7f64cea89df6bec59104987cbea34d34a2fa15d74310b", size = 73787 }, + { url = "https://files.pythonhosted.org/packages/6a/b0/9d9f667ab48b16ad4115c1935d94023b82b3198064cb84a123e97f7466c1/msgpack-1.1.2-cp314-cp314-win_arm64.whl", hash = "sha256:59415c6076b1e30e563eb732e23b994a61c159cec44deaf584e5cc1dd662f2af", size = 66453 }, + { url = "https://files.pythonhosted.org/packages/16/67/93f80545eb1792b61a217fa7f06d5e5cb9e0055bed867f43e2b8e012e137/msgpack-1.1.2-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:897c478140877e5307760b0ea66e0932738879e7aa68144d9b78ea4c8302a84a", size = 85264 }, + { url = "https://files.pythonhosted.org/packages/87/1c/33c8a24959cf193966ef11a6f6a2995a65eb066bd681fd085afd519a57ce/msgpack-1.1.2-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:a668204fa43e6d02f89dbe79a30b0d67238d9ec4c5bd8a940fc3a004a47b721b", size = 89076 }, + { url = "https://files.pythonhosted.org/packages/fc/6b/62e85ff7193663fbea5c0254ef32f0c77134b4059f8da89b958beb7696f3/msgpack-1.1.2-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5559d03930d3aa0f3aacb4c42c776af1a2ace2611871c84a75afe436695e6245", size = 435242 }, + { url = "https://files.pythonhosted.org/packages/c1/47/5c74ecb4cc277cf09f64e913947871682ffa82b3b93c8dad68083112f412/msgpack-1.1.2-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:70c5a7a9fea7f036b716191c29047374c10721c389c21e9ffafad04df8c52c90", size = 432509 }, + { url = "https://files.pythonhosted.org/packages/24/a4/e98ccdb56dc4e98c929a3f150de1799831c0a800583cde9fa022fa90602d/msgpack-1.1.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:f2cb069d8b981abc72b41aea1c580ce92d57c673ec61af4c500153a626cb9e20", size = 415957 }, + { url = "https://files.pythonhosted.org/packages/da/28/6951f7fb67bc0a4e184a6b38ab71a92d9ba58080b27a77d3e2fb0be5998f/msgpack-1.1.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:d62ce1f483f355f61adb5433ebfd8868c5f078d1a52d042b0a998682b4fa8c27", size = 422910 }, + { url = "https://files.pythonhosted.org/packages/f0/03/42106dcded51f0a0b5284d3ce30a671e7bd3f7318d122b2ead66ad289fed/msgpack-1.1.2-cp314-cp314t-win32.whl", hash = "sha256:1d1418482b1ee984625d88aa9585db570180c286d942da463533b238b98b812b", size = 75197 }, + { url = "https://files.pythonhosted.org/packages/15/86/d0071e94987f8db59d4eeb386ddc64d0bb9b10820a8d82bcd3e53eeb2da6/msgpack-1.1.2-cp314-cp314t-win_amd64.whl", hash = "sha256:5a46bf7e831d09470ad92dff02b8b1ac92175ca36b087f904a0519857c6be3ff", size = 85772 }, + { url = "https://files.pythonhosted.org/packages/81/f2/08ace4142eb281c12701fc3b93a10795e4d4dc7f753911d836675050f886/msgpack-1.1.2-cp314-cp314t-win_arm64.whl", hash = "sha256:d99ef64f349d5ec3293688e91486c5fdb925ed03807f64d98d205d2713c60b46", size = 70868 }, +] + +[[package]] +name = "opentelemetry-api" +version = "1.39.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "importlib-metadata" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/97/b9/3161be15bb8e3ad01be8be5a968a9237c3027c5be504362ff800fca3e442/opentelemetry_api-1.39.1.tar.gz", hash = "sha256:fbde8c80e1b937a2c61f20347e91c0c18a1940cecf012d62e65a7caf08967c9c", size = 65767 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cf/df/d3f1ddf4bb4cb50ed9b1139cc7b1c54c34a1e7ce8fd1b9a37c0d1551a6bd/opentelemetry_api-1.39.1-py3-none-any.whl", hash = "sha256:2edd8463432a7f8443edce90972169b195e7d6a05500cd29e6d13898187c9950", size = 66356 }, +] + +[[package]] +name = "packaging" +version = "26.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/65/ee/299d360cdc32edc7d2cf530f3accf79c4fca01e96ffc950d8a52213bd8e4/packaging-26.0.tar.gz", hash = "sha256:00243ae351a257117b6a241061796684b084ed1c516a08c48a3f7e147a9d80b4", size = 143416 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b7/b9/c538f279a4e237a006a2c98387d081e9eb060d203d8ed34467cc0f0b9b53/packaging-26.0-py3-none-any.whl", hash = "sha256:b36f1fef9334a5588b4166f8bcd26a14e521f2b55e6b9de3aaa80d3ff7a37529", size = 74366 }, +] + +[[package]] +name = "pluggy" +version = "1.6.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f9/e2/3e91f31a7d2b083fe6ef3fa267035b518369d9511ffab804f839851d2779/pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3", size = 69412 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746", size = 20538 }, +] + +[[package]] +name = "pygments" +version = "2.19.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b0/77/a5b8c569bf593b0140bde72ea885a803b82086995367bf2037de0159d924/pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887", size = 4968631 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b", size = 1225217 }, +] + +[[package]] +name = "pytest" +version = "9.0.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, + { name = "iniconfig" }, + { name = "packaging" }, + { name = "pluggy" }, + { name = "pygments" }, + { name = "tomli", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d1/db/7ef3487e0fb0049ddb5ce41d3a49c235bf9ad299b6a25d5780a89f19230f/pytest-9.0.2.tar.gz", hash = "sha256:75186651a92bd89611d1d9fc20f0b4345fd827c41ccd5c299a868a05d70edf11", size = 1568901 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3b/ab/b3226f0bd7cdcf710fbede2b3548584366da3b19b5021e74f5bde2a8fa3f/pytest-9.0.2-py3-none-any.whl", hash = "sha256:711ffd45bf766d5264d487b917733b453d917afd2b0ad65223959f59089f875b", size = 374801 }, +] + +[[package]] +name = "pytest-asyncio" +version = "1.3.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "backports-asyncio-runner", marker = "python_full_version < '3.11'" }, + { name = "pytest" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/90/2c/8af215c0f776415f3590cac4f9086ccefd6fd463befeae41cd4d3f193e5a/pytest_asyncio-1.3.0.tar.gz", hash = "sha256:d7f52f36d231b80ee124cd216ffb19369aa168fc10095013c6b014a34d3ee9e5", size = 50087 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e5/35/f8b19922b6a25bc0880171a2f1a003eaeb93657475193ab516fd87cac9da/pytest_asyncio-1.3.0-py3-none-any.whl", hash = "sha256:611e26147c7f77640e6d0a92a38ed17c3e9848063698d5c93d5aa7aa11cebff5", size = 15075 }, +] + +[[package]] +name = "river-client" +version = "0.1.0" +source = { editable = "." } +dependencies = [ + { name = "jinja2" }, + { name = "msgpack" }, + { name = "opentelemetry-api" }, + { name = "typing-extensions" }, + { name = "websockets" }, +] + +[package.optional-dependencies] +dev = [ + { name = "pytest" }, + { name = "pytest-asyncio" }, + { name = "ruff" }, +] + +[package.metadata] +requires-dist = [ + { name = "jinja2", specifier = ">=3.0" }, + { name = "msgpack", specifier = ">=1.0" }, + { name = "opentelemetry-api", specifier = ">=1.0" }, + { name = "pytest", marker = "extra == 'dev'", specifier = ">=8.0" }, + { name = "pytest-asyncio", marker = "extra == 'dev'", specifier = ">=0.23" }, + { name = "ruff", marker = "extra == 'dev'", specifier = ">=0.4" }, + { name = "typing-extensions", specifier = ">=4.0" }, + { name = "websockets", specifier = ">=12.0" }, +] + +[[package]] +name = "ruff" +version = "0.15.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/da/31/d6e536cdebb6568ae75a7f00e4b4819ae0ad2640c3604c305a0428680b0c/ruff-0.15.4.tar.gz", hash = "sha256:3412195319e42d634470cc97aa9803d07e9d5c9223b99bcb1518f0c725f26ae1", size = 4569550 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f2/82/c11a03cfec3a4d26a0ea1e571f0f44be5993b923f905eeddfc397c13d360/ruff-0.15.4-py3-none-linux_armv6l.whl", hash = "sha256:a1810931c41606c686bae8b5b9a8072adac2f611bb433c0ba476acba17a332e0", size = 10453333 }, + { url = "https://files.pythonhosted.org/packages/ce/5d/6a1f271f6e31dffb31855996493641edc3eef8077b883eaf007a2f1c2976/ruff-0.15.4-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:5a1632c66672b8b4d3e1d1782859e98d6e0b4e70829530666644286600a33992", size = 10853356 }, + { url = "https://files.pythonhosted.org/packages/b1/d8/0fab9f8842b83b1a9c2bf81b85063f65e93fb512e60effa95b0be49bfc54/ruff-0.15.4-py3-none-macosx_11_0_arm64.whl", hash = "sha256:a4386ba2cd6c0f4ff75252845906acc7c7c8e1ac567b7bc3d373686ac8c222ba", size = 10187434 }, + { url = "https://files.pythonhosted.org/packages/85/cc/cc220fd9394eff5db8d94dec199eec56dd6c9f3651d8869d024867a91030/ruff-0.15.4-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b2496488bdfd3732747558b6f95ae427ff066d1fcd054daf75f5a50674411e75", size = 10535456 }, + { url = "https://files.pythonhosted.org/packages/fa/0f/bced38fa5cf24373ec767713c8e4cadc90247f3863605fb030e597878661/ruff-0.15.4-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3f1c4893841ff2d54cbda1b2860fa3260173df5ddd7b95d370186f8a5e66a4ac", size = 10287772 }, + { url = "https://files.pythonhosted.org/packages/2b/90/58a1802d84fed15f8f281925b21ab3cecd813bde52a8ca033a4de8ab0e7a/ruff-0.15.4-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:820b8766bd65503b6c30aaa6331e8ef3a6e564f7999c844e9a547c40179e440a", size = 11049051 }, + { url = "https://files.pythonhosted.org/packages/d2/ac/b7ad36703c35f3866584564dc15f12f91cb1a26a897dc2fd13d7cb3ae1af/ruff-0.15.4-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c9fb74bab47139c1751f900f857fa503987253c3ef89129b24ed375e72873e85", size = 11890494 }, + { url = "https://files.pythonhosted.org/packages/93/3d/3eb2f47a39a8b0da99faf9c54d3eb24720add1e886a5309d4d1be73a6380/ruff-0.15.4-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f80c98765949c518142b3a50a5db89343aa90f2c2bf7799de9986498ae6176db", size = 11326221 }, + { url = "https://files.pythonhosted.org/packages/ff/90/bf134f4c1e5243e62690e09d63c55df948a74084c8ac3e48a88468314da6/ruff-0.15.4-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:451a2e224151729b3b6c9ffb36aed9091b2996fe4bdbd11f47e27d8f2e8888ec", size = 11168459 }, + { url = "https://files.pythonhosted.org/packages/b5/e5/a64d27688789b06b5d55162aafc32059bb8c989c61a5139a36e1368285eb/ruff-0.15.4-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:a8f157f2e583c513c4f5f896163a93198297371f34c04220daf40d133fdd4f7f", size = 11104366 }, + { url = "https://files.pythonhosted.org/packages/f1/f6/32d1dcb66a2559763fc3027bdd65836cad9eb09d90f2ed6a63d8e9252b02/ruff-0.15.4-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:917cc68503357021f541e69b35361c99387cdbbf99bd0ea4aa6f28ca99ff5338", size = 10510887 }, + { url = "https://files.pythonhosted.org/packages/ff/92/22d1ced50971c5b6433aed166fcef8c9343f567a94cf2b9d9089f6aa80fe/ruff-0.15.4-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:e9737c8161da79fd7cfec19f1e35620375bd8b2a50c3e77fa3d2c16f574105cc", size = 10285939 }, + { url = "https://files.pythonhosted.org/packages/e6/f4/7c20aec3143837641a02509a4668fb146a642fd1211846634edc17eb5563/ruff-0.15.4-py3-none-musllinux_1_2_i686.whl", hash = "sha256:291258c917539e18f6ba40482fe31d6f5ac023994ee11d7bdafd716f2aab8a68", size = 10765471 }, + { url = "https://files.pythonhosted.org/packages/d0/09/6d2f7586f09a16120aebdff8f64d962d7c4348313c77ebb29c566cefc357/ruff-0.15.4-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:3f83c45911da6f2cd5936c436cf86b9f09f09165f033a99dcf7477e34041cbc3", size = 11263382 }, + { url = "https://files.pythonhosted.org/packages/1b/fa/2ef715a1cd329ef47c1a050e10dee91a9054b7ce2fcfdd6a06d139afb7ec/ruff-0.15.4-py3-none-win32.whl", hash = "sha256:65594a2d557d4ee9f02834fcdf0a28daa8b3b9f6cb2cb93846025a36db47ef22", size = 10506664 }, + { url = "https://files.pythonhosted.org/packages/d0/a8/c688ef7e29983976820d18710f955751d9f4d4eb69df658af3d006e2ba3e/ruff-0.15.4-py3-none-win_amd64.whl", hash = "sha256:04196ad44f0df220c2ece5b0e959c2f37c777375ec744397d21d15b50a75264f", size = 11651048 }, + { url = "https://files.pythonhosted.org/packages/3e/0a/9e1be9035b37448ce2e68c978f0591da94389ade5a5abafa4cf99985d1b2/ruff-0.15.4-py3-none-win_arm64.whl", hash = "sha256:60d5177e8cfc70e51b9c5fad936c634872a74209f934c1e79107d11787ad5453", size = 10966776 }, +] + +[[package]] +name = "tomli" +version = "2.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/82/30/31573e9457673ab10aa432461bee537ce6cef177667deca369efb79df071/tomli-2.4.0.tar.gz", hash = "sha256:aa89c3f6c277dd275d8e243ad24f3b5e701491a860d5121f2cdd399fbb31fc9c", size = 17477 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3c/d9/3dc2289e1f3b32eb19b9785b6a006b28ee99acb37d1d47f78d4c10e28bf8/tomli-2.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b5ef256a3fd497d4973c11bf142e9ed78b150d36f5773f1ca6088c230ffc5867", size = 153663 }, + { url = "https://files.pythonhosted.org/packages/51/32/ef9f6845e6b9ca392cd3f64f9ec185cc6f09f0a2df3db08cbe8809d1d435/tomli-2.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5572e41282d5268eb09a697c89a7bee84fae66511f87533a6f88bd2f7b652da9", size = 148469 }, + { url = "https://files.pythonhosted.org/packages/d6/c2/506e44cce89a8b1b1e047d64bd495c22c9f71f21e05f380f1a950dd9c217/tomli-2.4.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:551e321c6ba03b55676970b47cb1b73f14a0a4dce6a3e1a9458fd6d921d72e95", size = 236039 }, + { url = "https://files.pythonhosted.org/packages/b3/40/e1b65986dbc861b7e986e8ec394598187fa8aee85b1650b01dd925ca0be8/tomli-2.4.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5e3f639a7a8f10069d0e15408c0b96a2a828cfdec6fca05296ebcdcc28ca7c76", size = 243007 }, + { url = "https://files.pythonhosted.org/packages/9c/6f/6e39ce66b58a5b7ae572a0f4352ff40c71e8573633deda43f6a379d56b3e/tomli-2.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1b168f2731796b045128c45982d3a4874057626da0e2ef1fdd722848b741361d", size = 240875 }, + { url = "https://files.pythonhosted.org/packages/aa/ad/cb089cb190487caa80204d503c7fd0f4d443f90b95cf4ef5cf5aa0f439b0/tomli-2.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:133e93646ec4300d651839d382d63edff11d8978be23da4cc106f5a18b7d0576", size = 246271 }, + { url = "https://files.pythonhosted.org/packages/0b/63/69125220e47fd7a3a27fd0de0c6398c89432fec41bc739823bcc66506af6/tomli-2.4.0-cp311-cp311-win32.whl", hash = "sha256:b6c78bdf37764092d369722d9946cb65b8767bfa4110f902a1b2542d8d173c8a", size = 96770 }, + { url = "https://files.pythonhosted.org/packages/1e/0d/a22bb6c83f83386b0008425a6cd1fa1c14b5f3dd4bad05e98cf3dbbf4a64/tomli-2.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:d3d1654e11d724760cdb37a3d7691f0be9db5fbdaef59c9f532aabf87006dbaa", size = 107626 }, + { url = "https://files.pythonhosted.org/packages/2f/6d/77be674a3485e75cacbf2ddba2b146911477bd887dda9d8c9dfb2f15e871/tomli-2.4.0-cp311-cp311-win_arm64.whl", hash = "sha256:cae9c19ed12d4e8f3ebf46d1a75090e4c0dc16271c5bce1c833ac168f08fb614", size = 94842 }, + { url = "https://files.pythonhosted.org/packages/3c/43/7389a1869f2f26dba52404e1ef13b4784b6b37dac93bac53457e3ff24ca3/tomli-2.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:920b1de295e72887bafa3ad9f7a792f811847d57ea6b1215154030cf131f16b1", size = 154894 }, + { url = "https://files.pythonhosted.org/packages/e9/05/2f9bf110b5294132b2edf13fe6ca6ae456204f3d749f623307cbb7a946f2/tomli-2.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7d6d9a4aee98fac3eab4952ad1d73aee87359452d1c086b5ceb43ed02ddb16b8", size = 149053 }, + { url = "https://files.pythonhosted.org/packages/e8/41/1eda3ca1abc6f6154a8db4d714a4d35c4ad90adc0bcf700657291593fbf3/tomli-2.4.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:36b9d05b51e65b254ea6c2585b59d2c4cb91c8a3d91d0ed0f17591a29aaea54a", size = 243481 }, + { url = "https://files.pythonhosted.org/packages/d2/6d/02ff5ab6c8868b41e7d4b987ce2b5f6a51d3335a70aa144edd999e055a01/tomli-2.4.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1c8a885b370751837c029ef9bc014f27d80840e48bac415f3412e6593bbc18c1", size = 251720 }, + { url = "https://files.pythonhosted.org/packages/7b/57/0405c59a909c45d5b6f146107c6d997825aa87568b042042f7a9c0afed34/tomli-2.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8768715ffc41f0008abe25d808c20c3d990f42b6e2e58305d5da280ae7d1fa3b", size = 247014 }, + { url = "https://files.pythonhosted.org/packages/2c/0e/2e37568edd944b4165735687cbaf2fe3648129e440c26d02223672ee0630/tomli-2.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7b438885858efd5be02a9a133caf5812b8776ee0c969fea02c45e8e3f296ba51", size = 251820 }, + { url = "https://files.pythonhosted.org/packages/5a/1c/ee3b707fdac82aeeb92d1a113f803cf6d0f37bdca0849cb489553e1f417a/tomli-2.4.0-cp312-cp312-win32.whl", hash = "sha256:0408e3de5ec77cc7f81960c362543cbbd91ef883e3138e81b729fc3eea5b9729", size = 97712 }, + { url = "https://files.pythonhosted.org/packages/69/13/c07a9177d0b3bab7913299b9278845fc6eaaca14a02667c6be0b0a2270c8/tomli-2.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:685306e2cc7da35be4ee914fd34ab801a6acacb061b6a7abca922aaf9ad368da", size = 108296 }, + { url = "https://files.pythonhosted.org/packages/18/27/e267a60bbeeee343bcc279bb9e8fbed0cbe224bc7b2a3dc2975f22809a09/tomli-2.4.0-cp312-cp312-win_arm64.whl", hash = "sha256:5aa48d7c2356055feef06a43611fc401a07337d5b006be13a30f6c58f869e3c3", size = 94553 }, + { url = "https://files.pythonhosted.org/packages/34/91/7f65f9809f2936e1f4ce6268ae1903074563603b2a2bd969ebbda802744f/tomli-2.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:84d081fbc252d1b6a982e1870660e7330fb8f90f676f6e78b052ad4e64714bf0", size = 154915 }, + { url = "https://files.pythonhosted.org/packages/20/aa/64dd73a5a849c2e8f216b755599c511badde80e91e9bc2271baa7b2cdbb1/tomli-2.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9a08144fa4cba33db5255f9b74f0b89888622109bd2776148f2597447f92a94e", size = 149038 }, + { url = "https://files.pythonhosted.org/packages/9e/8a/6d38870bd3d52c8d1505ce054469a73f73a0fe62c0eaf5dddf61447e32fa/tomli-2.4.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c73add4bb52a206fd0c0723432db123c0c75c280cbd67174dd9d2db228ebb1b4", size = 242245 }, + { url = "https://files.pythonhosted.org/packages/59/bb/8002fadefb64ab2669e5b977df3f5e444febea60e717e755b38bb7c41029/tomli-2.4.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1fb2945cbe303b1419e2706e711b7113da57b7db31ee378d08712d678a34e51e", size = 250335 }, + { url = "https://files.pythonhosted.org/packages/a5/3d/4cdb6f791682b2ea916af2de96121b3cb1284d7c203d97d92d6003e91c8d/tomli-2.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bbb1b10aa643d973366dc2cb1ad94f99c1726a02343d43cbc011edbfac579e7c", size = 245962 }, + { url = "https://files.pythonhosted.org/packages/f2/4a/5f25789f9a460bd858ba9756ff52d0830d825b458e13f754952dd15fb7bb/tomli-2.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4cbcb367d44a1f0c2be408758b43e1ffb5308abe0ea222897d6bfc8e8281ef2f", size = 250396 }, + { url = "https://files.pythonhosted.org/packages/aa/2f/b73a36fea58dfa08e8b3a268750e6853a6aac2a349241a905ebd86f3047a/tomli-2.4.0-cp313-cp313-win32.whl", hash = "sha256:7d49c66a7d5e56ac959cb6fc583aff0651094ec071ba9ad43df785abc2320d86", size = 97530 }, + { url = "https://files.pythonhosted.org/packages/3b/af/ca18c134b5d75de7e8dc551c5234eaba2e8e951f6b30139599b53de9c187/tomli-2.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:3cf226acb51d8f1c394c1b310e0e0e61fecdd7adcb78d01e294ac297dd2e7f87", size = 108227 }, + { url = "https://files.pythonhosted.org/packages/22/c3/b386b832f209fee8073c8138ec50f27b4460db2fdae9ffe022df89a57f9b/tomli-2.4.0-cp313-cp313-win_arm64.whl", hash = "sha256:d20b797a5c1ad80c516e41bc1fb0443ddb5006e9aaa7bda2d71978346aeb9132", size = 94748 }, + { url = "https://files.pythonhosted.org/packages/f3/c4/84047a97eb1004418bc10bdbcfebda209fca6338002eba2dc27cc6d13563/tomli-2.4.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:26ab906a1eb794cd4e103691daa23d95c6919cc2fa9160000ac02370cc9dd3f6", size = 154725 }, + { url = "https://files.pythonhosted.org/packages/a8/5d/d39038e646060b9d76274078cddf146ced86dc2b9e8bbf737ad5983609a0/tomli-2.4.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:20cedb4ee43278bc4f2fee6cb50daec836959aadaf948db5172e776dd3d993fc", size = 148901 }, + { url = "https://files.pythonhosted.org/packages/73/e5/383be1724cb30f4ce44983d249645684a48c435e1cd4f8b5cded8a816d3c/tomli-2.4.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:39b0b5d1b6dd03684b3fb276407ebed7090bbec989fa55838c98560c01113b66", size = 243375 }, + { url = "https://files.pythonhosted.org/packages/31/f0/bea80c17971c8d16d3cc109dc3585b0f2ce1036b5f4a8a183789023574f2/tomli-2.4.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a26d7ff68dfdb9f87a016ecfd1e1c2bacbe3108f4e0f8bcd2228ef9a766c787d", size = 250639 }, + { url = "https://files.pythonhosted.org/packages/2c/8f/2853c36abbb7608e3f945d8a74e32ed3a74ee3a1f468f1ffc7d1cb3abba6/tomli-2.4.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:20ffd184fb1df76a66e34bd1b36b4a4641bd2b82954befa32fe8163e79f1a702", size = 246897 }, + { url = "https://files.pythonhosted.org/packages/49/f0/6c05e3196ed5337b9fe7ea003e95fd3819a840b7a0f2bf5a408ef1dad8ed/tomli-2.4.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:75c2f8bbddf170e8effc98f5e9084a8751f8174ea6ccf4fca5398436e0320bc8", size = 254697 }, + { url = "https://files.pythonhosted.org/packages/f3/f5/2922ef29c9f2951883525def7429967fc4d8208494e5ab524234f06b688b/tomli-2.4.0-cp314-cp314-win32.whl", hash = "sha256:31d556d079d72db7c584c0627ff3a24c5d3fb4f730221d3444f3efb1b2514776", size = 98567 }, + { url = "https://files.pythonhosted.org/packages/7b/31/22b52e2e06dd2a5fdbc3ee73226d763b184ff21fc24e20316a44ccc4d96b/tomli-2.4.0-cp314-cp314-win_amd64.whl", hash = "sha256:43e685b9b2341681907759cf3a04e14d7104b3580f808cfde1dfdb60ada85475", size = 108556 }, + { url = "https://files.pythonhosted.org/packages/48/3d/5058dff3255a3d01b705413f64f4306a141a8fd7a251e5a495e3f192a998/tomli-2.4.0-cp314-cp314-win_arm64.whl", hash = "sha256:3d895d56bd3f82ddd6faaff993c275efc2ff38e52322ea264122d72729dca2b2", size = 96014 }, + { url = "https://files.pythonhosted.org/packages/b8/4e/75dab8586e268424202d3a1997ef6014919c941b50642a1682df43204c22/tomli-2.4.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:5b5807f3999fb66776dbce568cc9a828544244a8eb84b84b9bafc080c99597b9", size = 163339 }, + { url = "https://files.pythonhosted.org/packages/06/e3/b904d9ab1016829a776d97f163f183a48be6a4deb87304d1e0116a349519/tomli-2.4.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:c084ad935abe686bd9c898e62a02a19abfc9760b5a79bc29644463eaf2840cb0", size = 159490 }, + { url = "https://files.pythonhosted.org/packages/e3/5a/fc3622c8b1ad823e8ea98a35e3c632ee316d48f66f80f9708ceb4f2a0322/tomli-2.4.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0f2e3955efea4d1cfbcb87bc321e00dc08d2bcb737fd1d5e398af111d86db5df", size = 269398 }, + { url = "https://files.pythonhosted.org/packages/fd/33/62bd6152c8bdd4c305ad9faca48f51d3acb2df1f8791b1477d46ff86e7f8/tomli-2.4.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0e0fe8a0b8312acf3a88077a0802565cb09ee34107813bba1c7cd591fa6cfc8d", size = 276515 }, + { url = "https://files.pythonhosted.org/packages/4b/ff/ae53619499f5235ee4211e62a8d7982ba9e439a0fb4f2f351a93d67c1dd2/tomli-2.4.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:413540dce94673591859c4c6f794dfeaa845e98bf35d72ed59636f869ef9f86f", size = 273806 }, + { url = "https://files.pythonhosted.org/packages/47/71/cbca7787fa68d4d0a9f7072821980b39fbb1b6faeb5f5cf02f4a5559fa28/tomli-2.4.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:0dc56fef0e2c1c470aeac5b6ca8cc7b640bb93e92d9803ddaf9ea03e198f5b0b", size = 281340 }, + { url = "https://files.pythonhosted.org/packages/f5/00/d595c120963ad42474cf6ee7771ad0d0e8a49d0f01e29576ee9195d9ecdf/tomli-2.4.0-cp314-cp314t-win32.whl", hash = "sha256:d878f2a6707cc9d53a1be1414bbb419e629c3d6e67f69230217bb663e76b5087", size = 108106 }, + { url = "https://files.pythonhosted.org/packages/de/69/9aa0c6a505c2f80e519b43764f8b4ba93b5a0bbd2d9a9de6e2b24271b9a5/tomli-2.4.0-cp314-cp314t-win_amd64.whl", hash = "sha256:2add28aacc7425117ff6364fe9e06a183bb0251b03f986df0e78e974047571fd", size = 120504 }, + { url = "https://files.pythonhosted.org/packages/b3/9f/f1668c281c58cfae01482f7114a4b88d345e4c140386241a1a24dcc9e7bc/tomli-2.4.0-cp314-cp314t-win_arm64.whl", hash = "sha256:2b1e3b80e1d5e52e40e9b924ec43d81570f0e7d09d11081b797bc4692765a3d4", size = 99561 }, + { url = "https://files.pythonhosted.org/packages/23/d1/136eb2cb77520a31e1f64cbae9d33ec6df0d78bdf4160398e86eec8a8754/tomli-2.4.0-py3-none-any.whl", hash = "sha256:1f776e7d669ebceb01dee46484485f43a4048746235e683bcdffacdf1fb4785a", size = 14477 }, +] + +[[package]] +name = "typing-extensions" +version = "4.15.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/72/94/1a15dd82efb362ac84269196e94cf00f187f7ed21c242792a923cdb1c61f/typing_extensions-4.15.0.tar.gz", hash = "sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466", size = 109391 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/18/67/36e9267722cc04a6b9f15c7f3441c2363321a3ea07da7ae0c0707beb2a9c/typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548", size = 44614 }, +] + +[[package]] +name = "websockets" +version = "16.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/04/24/4b2031d72e840ce4c1ccb255f693b15c334757fc50023e4db9537080b8c4/websockets-16.0.tar.gz", hash = "sha256:5f6261a5e56e8d5c42a4497b364ea24d94d9563e8fbd44e78ac40879c60179b5", size = 179346 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/20/74/221f58decd852f4b59cc3354cccaf87e8ef695fede361d03dc9a7396573b/websockets-16.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:04cdd5d2d1dacbad0a7bf36ccbcd3ccd5a30ee188f2560b7a62a30d14107b31a", size = 177343 }, + { url = "https://files.pythonhosted.org/packages/19/0f/22ef6107ee52ab7f0b710d55d36f5a5d3ef19e8a205541a6d7ffa7994e5a/websockets-16.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8ff32bb86522a9e5e31439a58addbb0166f0204d64066fb955265c4e214160f0", size = 175021 }, + { url = "https://files.pythonhosted.org/packages/10/40/904a4cb30d9b61c0e278899bf36342e9b0208eb3c470324a9ecbaac2a30f/websockets-16.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:583b7c42688636f930688d712885cf1531326ee05effd982028212ccc13e5957", size = 175320 }, + { url = "https://files.pythonhosted.org/packages/9d/2f/4b3ca7e106bc608744b1cdae041e005e446124bebb037b18799c2d356864/websockets-16.0-cp310-cp310-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:7d837379b647c0c4c2355c2499723f82f1635fd2c26510e1f587d89bc2199e72", size = 183815 }, + { url = "https://files.pythonhosted.org/packages/86/26/d40eaa2a46d4302becec8d15b0fc5e45bdde05191e7628405a19cf491ccd/websockets-16.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:df57afc692e517a85e65b72e165356ed1df12386ecb879ad5693be08fac65dde", size = 185054 }, + { url = "https://files.pythonhosted.org/packages/b0/ba/6500a0efc94f7373ee8fefa8c271acdfd4dca8bd49a90d4be7ccabfc397e/websockets-16.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:2b9f1e0d69bc60a4a87349d50c09a037a2607918746f07de04df9e43252c77a3", size = 184565 }, + { url = "https://files.pythonhosted.org/packages/04/b4/96bf2cee7c8d8102389374a2616200574f5f01128d1082f44102140344cc/websockets-16.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:335c23addf3d5e6a8633f9f8eda77efad001671e80b95c491dd0924587ece0b3", size = 183848 }, + { url = "https://files.pythonhosted.org/packages/02/8e/81f40fb00fd125357814e8c3025738fc4ffc3da4b6b4a4472a82ba304b41/websockets-16.0-cp310-cp310-win32.whl", hash = "sha256:37b31c1623c6605e4c00d466c9d633f9b812ea430c11c8a278774a1fde1acfa9", size = 178249 }, + { url = "https://files.pythonhosted.org/packages/b4/5f/7e40efe8df57db9b91c88a43690ac66f7b7aa73a11aa6a66b927e44f26fa/websockets-16.0-cp310-cp310-win_amd64.whl", hash = "sha256:8e1dab317b6e77424356e11e99a432b7cb2f3ec8c5ab4dabbcee6add48f72b35", size = 178685 }, + { url = "https://files.pythonhosted.org/packages/f2/db/de907251b4ff46ae804ad0409809504153b3f30984daf82a1d84a9875830/websockets-16.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:31a52addea25187bde0797a97d6fc3d2f92b6f72a9370792d65a6e84615ac8a8", size = 177340 }, + { url = "https://files.pythonhosted.org/packages/f3/fa/abe89019d8d8815c8781e90d697dec52523fb8ebe308bf11664e8de1877e/websockets-16.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:417b28978cdccab24f46400586d128366313e8a96312e4b9362a4af504f3bbad", size = 175022 }, + { url = "https://files.pythonhosted.org/packages/58/5d/88ea17ed1ded2079358b40d31d48abe90a73c9e5819dbcde1606e991e2ad/websockets-16.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:af80d74d4edfa3cb9ed973a0a5ba2b2a549371f8a741e0800cb07becdd20f23d", size = 175319 }, + { url = "https://files.pythonhosted.org/packages/d2/ae/0ee92b33087a33632f37a635e11e1d99d429d3d323329675a6022312aac2/websockets-16.0-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:08d7af67b64d29823fed316505a89b86705f2b7981c07848fb5e3ea3020c1abe", size = 184631 }, + { url = "https://files.pythonhosted.org/packages/c8/c5/27178df583b6c5b31b29f526ba2da5e2f864ecc79c99dae630a85d68c304/websockets-16.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7be95cfb0a4dae143eaed2bcba8ac23f4892d8971311f1b06f3c6b78952ee70b", size = 185870 }, + { url = "https://files.pythonhosted.org/packages/87/05/536652aa84ddc1c018dbb7e2c4cbcd0db884580bf8e95aece7593fde526f/websockets-16.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d6297ce39ce5c2e6feb13c1a996a2ded3b6832155fcfc920265c76f24c7cceb5", size = 185361 }, + { url = "https://files.pythonhosted.org/packages/6d/e2/d5332c90da12b1e01f06fb1b85c50cfc489783076547415bf9f0a659ec19/websockets-16.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1c1b30e4f497b0b354057f3467f56244c603a79c0d1dafce1d16c283c25f6e64", size = 184615 }, + { url = "https://files.pythonhosted.org/packages/77/fb/d3f9576691cae9253b51555f841bc6600bf0a983a461c79500ace5a5b364/websockets-16.0-cp311-cp311-win32.whl", hash = "sha256:5f451484aeb5cafee1ccf789b1b66f535409d038c56966d6101740c1614b86c6", size = 178246 }, + { url = "https://files.pythonhosted.org/packages/54/67/eaff76b3dbaf18dcddabc3b8c1dba50b483761cccff67793897945b37408/websockets-16.0-cp311-cp311-win_amd64.whl", hash = "sha256:8d7f0659570eefb578dacde98e24fb60af35350193e4f56e11190787bee77dac", size = 178684 }, + { url = "https://files.pythonhosted.org/packages/84/7b/bac442e6b96c9d25092695578dda82403c77936104b5682307bd4deb1ad4/websockets-16.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:71c989cbf3254fbd5e84d3bff31e4da39c43f884e64f2551d14bb3c186230f00", size = 177365 }, + { url = "https://files.pythonhosted.org/packages/b0/fe/136ccece61bd690d9c1f715baaeefd953bb2360134de73519d5df19d29ca/websockets-16.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:8b6e209ffee39ff1b6d0fa7bfef6de950c60dfb91b8fcead17da4ee539121a79", size = 175038 }, + { url = "https://files.pythonhosted.org/packages/40/1e/9771421ac2286eaab95b8575b0cb701ae3663abf8b5e1f64f1fd90d0a673/websockets-16.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:86890e837d61574c92a97496d590968b23c2ef0aeb8a9bc9421d174cd378ae39", size = 175328 }, + { url = "https://files.pythonhosted.org/packages/18/29/71729b4671f21e1eaa5d6573031ab810ad2936c8175f03f97f3ff164c802/websockets-16.0-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:9b5aca38b67492ef518a8ab76851862488a478602229112c4b0d58d63a7a4d5c", size = 184915 }, + { url = "https://files.pythonhosted.org/packages/97/bb/21c36b7dbbafc85d2d480cd65df02a1dc93bf76d97147605a8e27ff9409d/websockets-16.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e0334872c0a37b606418ac52f6ab9cfd17317ac26365f7f65e203e2d0d0d359f", size = 186152 }, + { url = "https://files.pythonhosted.org/packages/4a/34/9bf8df0c0cf88fa7bfe36678dc7b02970c9a7d5e065a3099292db87b1be2/websockets-16.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a0b31e0b424cc6b5a04b8838bbaec1688834b2383256688cf47eb97412531da1", size = 185583 }, + { url = "https://files.pythonhosted.org/packages/47/88/4dd516068e1a3d6ab3c7c183288404cd424a9a02d585efbac226cb61ff2d/websockets-16.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:485c49116d0af10ac698623c513c1cc01c9446c058a4e61e3bf6c19dff7335a2", size = 184880 }, + { url = "https://files.pythonhosted.org/packages/91/d6/7d4553ad4bf1c0421e1ebd4b18de5d9098383b5caa1d937b63df8d04b565/websockets-16.0-cp312-cp312-win32.whl", hash = "sha256:eaded469f5e5b7294e2bdca0ab06becb6756ea86894a47806456089298813c89", size = 178261 }, + { url = "https://files.pythonhosted.org/packages/c3/f0/f3a17365441ed1c27f850a80b2bc680a0fa9505d733fe152fdf5e98c1c0b/websockets-16.0-cp312-cp312-win_amd64.whl", hash = "sha256:5569417dc80977fc8c2d43a86f78e0a5a22fee17565d78621b6bb264a115d4ea", size = 178693 }, + { url = "https://files.pythonhosted.org/packages/cc/9c/baa8456050d1c1b08dd0ec7346026668cbc6f145ab4e314d707bb845bf0d/websockets-16.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:878b336ac47938b474c8f982ac2f7266a540adc3fa4ad74ae96fea9823a02cc9", size = 177364 }, + { url = "https://files.pythonhosted.org/packages/7e/0c/8811fc53e9bcff68fe7de2bcbe75116a8d959ac699a3200f4847a8925210/websockets-16.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:52a0fec0e6c8d9a784c2c78276a48a2bdf099e4ccc2a4cad53b27718dbfd0230", size = 175039 }, + { url = "https://files.pythonhosted.org/packages/aa/82/39a5f910cb99ec0b59e482971238c845af9220d3ab9fa76dd9162cda9d62/websockets-16.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:e6578ed5b6981005df1860a56e3617f14a6c307e6a71b4fff8c48fdc50f3ed2c", size = 175323 }, + { url = "https://files.pythonhosted.org/packages/bd/28/0a25ee5342eb5d5f297d992a77e56892ecb65e7854c7898fb7d35e9b33bd/websockets-16.0-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:95724e638f0f9c350bb1c2b0a7ad0e83d9cc0c9259f3ea94e40d7b02a2179ae5", size = 184975 }, + { url = "https://files.pythonhosted.org/packages/f9/66/27ea52741752f5107c2e41fda05e8395a682a1e11c4e592a809a90c6a506/websockets-16.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c0204dc62a89dc9d50d682412c10b3542d748260d743500a85c13cd1ee4bde82", size = 186203 }, + { url = "https://files.pythonhosted.org/packages/37/e5/8e32857371406a757816a2b471939d51c463509be73fa538216ea52b792a/websockets-16.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:52ac480f44d32970d66763115edea932f1c5b1312de36df06d6b219f6741eed8", size = 185653 }, + { url = "https://files.pythonhosted.org/packages/9b/67/f926bac29882894669368dc73f4da900fcdf47955d0a0185d60103df5737/websockets-16.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6e5a82b677f8f6f59e8dfc34ec06ca6b5b48bc4fcda346acd093694cc2c24d8f", size = 184920 }, + { url = "https://files.pythonhosted.org/packages/3c/a1/3d6ccdcd125b0a42a311bcd15a7f705d688f73b2a22d8cf1c0875d35d34a/websockets-16.0-cp313-cp313-win32.whl", hash = "sha256:abf050a199613f64c886ea10f38b47770a65154dc37181bfaff70c160f45315a", size = 178255 }, + { url = "https://files.pythonhosted.org/packages/6b/ae/90366304d7c2ce80f9b826096a9e9048b4bb760e44d3b873bb272cba696b/websockets-16.0-cp313-cp313-win_amd64.whl", hash = "sha256:3425ac5cf448801335d6fdc7ae1eb22072055417a96cc6b31b3861f455fbc156", size = 178689 }, + { url = "https://files.pythonhosted.org/packages/f3/1d/e88022630271f5bd349ed82417136281931e558d628dd52c4d8621b4a0b2/websockets-16.0-cp314-cp314-macosx_10_15_universal2.whl", hash = "sha256:8cc451a50f2aee53042ac52d2d053d08bf89bcb31ae799cb4487587661c038a0", size = 177406 }, + { url = "https://files.pythonhosted.org/packages/f2/78/e63be1bf0724eeb4616efb1ae1c9044f7c3953b7957799abb5915bffd38e/websockets-16.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:daa3b6ff70a9241cf6c7fc9e949d41232d9d7d26fd3522b1ad2b4d62487e9904", size = 175085 }, + { url = "https://files.pythonhosted.org/packages/bb/f4/d3c9220d818ee955ae390cf319a7c7a467beceb24f05ee7aaaa2414345ba/websockets-16.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:fd3cb4adb94a2a6e2b7c0d8d05cb94e6f1c81a0cf9dc2694fb65c7e8d94c42e4", size = 175328 }, + { url = "https://files.pythonhosted.org/packages/63/bc/d3e208028de777087e6fb2b122051a6ff7bbcca0d6df9d9c2bf1dd869ae9/websockets-16.0-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:781caf5e8eee67f663126490c2f96f40906594cb86b408a703630f95550a8c3e", size = 185044 }, + { url = "https://files.pythonhosted.org/packages/ad/6e/9a0927ac24bd33a0a9af834d89e0abc7cfd8e13bed17a86407a66773cc0e/websockets-16.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:caab51a72c51973ca21fa8a18bd8165e1a0183f1ac7066a182ff27107b71e1a4", size = 186279 }, + { url = "https://files.pythonhosted.org/packages/b9/ca/bf1c68440d7a868180e11be653c85959502efd3a709323230314fda6e0b3/websockets-16.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:19c4dc84098e523fd63711e563077d39e90ec6702aff4b5d9e344a60cb3c0cb1", size = 185711 }, + { url = "https://files.pythonhosted.org/packages/c4/f8/fdc34643a989561f217bb477cbc47a3a07212cbda91c0e4389c43c296ebf/websockets-16.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:a5e18a238a2b2249c9a9235466b90e96ae4795672598a58772dd806edc7ac6d3", size = 184982 }, + { url = "https://files.pythonhosted.org/packages/dd/d1/574fa27e233764dbac9c52730d63fcf2823b16f0856b3329fc6268d6ae4f/websockets-16.0-cp314-cp314-win32.whl", hash = "sha256:a069d734c4a043182729edd3e9f247c3b2a4035415a9172fd0f1b71658a320a8", size = 177915 }, + { url = "https://files.pythonhosted.org/packages/8a/f1/ae6b937bf3126b5134ce1f482365fde31a357c784ac51852978768b5eff4/websockets-16.0-cp314-cp314-win_amd64.whl", hash = "sha256:c0ee0e63f23914732c6d7e0cce24915c48f3f1512ec1d079ed01fc629dab269d", size = 178381 }, + { url = "https://files.pythonhosted.org/packages/06/9b/f791d1db48403e1f0a27577a6beb37afae94254a8c6f08be4a23e4930bc0/websockets-16.0-cp314-cp314t-macosx_10_15_universal2.whl", hash = "sha256:a35539cacc3febb22b8f4d4a99cc79b104226a756aa7400adc722e83b0d03244", size = 177737 }, + { url = "https://files.pythonhosted.org/packages/bd/40/53ad02341fa33b3ce489023f635367a4ac98b73570102ad2cdd770dacc9a/websockets-16.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:b784ca5de850f4ce93ec85d3269d24d4c82f22b7212023c974c401d4980ebc5e", size = 175268 }, + { url = "https://files.pythonhosted.org/packages/74/9b/6158d4e459b984f949dcbbb0c5d270154c7618e11c01029b9bbd1bb4c4f9/websockets-16.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:569d01a4e7fba956c5ae4fc988f0d4e187900f5497ce46339c996dbf24f17641", size = 175486 }, + { url = "https://files.pythonhosted.org/packages/e5/2d/7583b30208b639c8090206f95073646c2c9ffd66f44df967981a64f849ad/websockets-16.0-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:50f23cdd8343b984957e4077839841146f67a3d31ab0d00e6b824e74c5b2f6e8", size = 185331 }, + { url = "https://files.pythonhosted.org/packages/45/b0/cce3784eb519b7b5ad680d14b9673a31ab8dcb7aad8b64d81709d2430aa8/websockets-16.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:152284a83a00c59b759697b7f9e9cddf4e3c7861dd0d964b472b70f78f89e80e", size = 186501 }, + { url = "https://files.pythonhosted.org/packages/19/60/b8ebe4c7e89fb5f6cdf080623c9d92789a53636950f7abacfc33fe2b3135/websockets-16.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:bc59589ab64b0022385f429b94697348a6a234e8ce22544e3681b2e9331b5944", size = 186062 }, + { url = "https://files.pythonhosted.org/packages/88/a8/a080593f89b0138b6cba1b28f8df5673b5506f72879322288b031337c0b8/websockets-16.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:32da954ffa2814258030e5a57bc73a3635463238e797c7375dc8091327434206", size = 185356 }, + { url = "https://files.pythonhosted.org/packages/c2/b6/b9afed2afadddaf5ebb2afa801abf4b0868f42f8539bfe4b071b5266c9fe/websockets-16.0-cp314-cp314t-win32.whl", hash = "sha256:5a4b4cc550cb665dd8a47f868c8d04c8230f857363ad3c9caf7a0c3bf8c61ca6", size = 178085 }, + { url = "https://files.pythonhosted.org/packages/9f/3e/28135a24e384493fa804216b79a6a6759a38cc4ff59118787b9fb693df93/websockets-16.0-cp314-cp314t-win_amd64.whl", hash = "sha256:b14dc141ed6d2dde437cddb216004bcac6a1df0935d79656387bd41632ba0bbd", size = 178531 }, + { url = "https://files.pythonhosted.org/packages/72/07/c98a68571dcf256e74f1f816b8cc5eae6eb2d3d5cfa44d37f801619d9166/websockets-16.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:349f83cd6c9a415428ee1005cadb5c2c56f4389bc06a9af16103c3bc3dcc8b7d", size = 174947 }, + { url = "https://files.pythonhosted.org/packages/7e/52/93e166a81e0305b33fe416338be92ae863563fe7bce446b0f687b9df5aea/websockets-16.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:4a1aba3340a8dca8db6eb5a7986157f52eb9e436b74813764241981ca4888f03", size = 175260 }, + { url = "https://files.pythonhosted.org/packages/56/0c/2dbf513bafd24889d33de2ff0368190a0e69f37bcfa19009ef819fe4d507/websockets-16.0-pp311-pypy311_pp73-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:f4a32d1bd841d4bcbffdcb3d2ce50c09c3909fbead375ab28d0181af89fd04da", size = 176071 }, + { url = "https://files.pythonhosted.org/packages/a5/8f/aea9c71cc92bf9b6cc0f7f70df8f0b420636b6c96ef4feee1e16f80f75dd/websockets-16.0-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0298d07ee155e2e9fda5be8a9042200dd2e3bb0b8a38482156576f863a9d457c", size = 176968 }, + { url = "https://files.pythonhosted.org/packages/9a/3f/f70e03f40ffc9a30d817eef7da1be72ee4956ba8d7255c399a01b135902a/websockets-16.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:a653aea902e0324b52f1613332ddf50b00c06fdaf7e92624fbf8c77c78fa5767", size = 178735 }, + { url = "https://files.pythonhosted.org/packages/6f/28/258ebab549c2bf3e64d2b0217b973467394a9cea8c42f70418ca2c5d0d2e/websockets-16.0-py3-none-any.whl", hash = "sha256:1637db62fad1dc833276dded54215f2c7fa46912301a24bd94d45d46a011ceec", size = 171598 }, +] + +[[package]] +name = "zipp" +version = "3.23.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e3/02/0f2892c661036d50ede074e376733dca2ae7c6eb617489437771209d4180/zipp-3.23.0.tar.gz", hash = "sha256:a07157588a12518c9d4034df3fbbee09c814741a33ff63c05fa29d26a2404166", size = 25547 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2e/54/647ade08bf0db230bfea292f893923872fd20be6ac6f53b2b936ba839d75/zipp-3.23.0-py3-none-any.whl", hash = "sha256:071652d6115ed432f5ce1d34c336c0adfd6a884660d1e9712a256d3d3bd4b14e", size = 10276 }, +]