|
6 | 6 | from mcp import Client |
7 | 7 | from mcp.server import Server, ServerRequestContext |
8 | 8 | from mcp.shared.exceptions import MCPError |
| 9 | +from mcp.shared.message import SessionMessage |
9 | 10 | from mcp.types import ( |
| 11 | + LATEST_PROTOCOL_VERSION, |
10 | 12 | CallToolRequest, |
11 | 13 | CallToolRequestParams, |
12 | 14 | CallToolResult, |
13 | 15 | CancelledNotification, |
14 | 16 | CancelledNotificationParams, |
| 17 | + ClientCapabilities, |
| 18 | + Implementation, |
| 19 | + InitializeRequestParams, |
| 20 | + JSONRPCNotification, |
| 21 | + JSONRPCRequest, |
15 | 22 | ListToolsResult, |
16 | 23 | PaginatedRequestParams, |
17 | 24 | TextContent, |
@@ -90,3 +97,154 @@ async def first_request(): |
90 | 97 | assert isinstance(content, TextContent) |
91 | 98 | assert content.text == "Call number: 2" |
92 | 99 | assert call_count == 2 |
| 100 | + |
| 101 | + |
| 102 | +@pytest.mark.anyio |
| 103 | +async def test_server_cancels_in_flight_handlers_on_transport_close(): |
| 104 | + """When the transport closes mid-request, server.run() must cancel in-flight |
| 105 | + handlers rather than join on them. |
| 106 | +
|
| 107 | + Without the cancel, the task group waits for the handler, which then tries |
| 108 | + to respond through a write stream that _receive_loop already closed, |
| 109 | + raising ClosedResourceError and crashing server.run() with exit code 1. |
| 110 | +
|
| 111 | + This drives server.run() with raw memory streams because InMemoryTransport |
| 112 | + wraps it in its own finally-cancel (_memory.py) which masks the bug. |
| 113 | + """ |
| 114 | + handler_started = anyio.Event() |
| 115 | + handler_cancelled = anyio.Event() |
| 116 | + server_run_returned = anyio.Event() |
| 117 | + |
| 118 | + async def handle_call_tool(ctx: ServerRequestContext, params: CallToolRequestParams) -> CallToolResult: |
| 119 | + handler_started.set() |
| 120 | + try: |
| 121 | + await anyio.sleep_forever() |
| 122 | + finally: |
| 123 | + handler_cancelled.set() |
| 124 | + # unreachable: sleep_forever only exits via cancellation |
| 125 | + raise AssertionError # pragma: no cover |
| 126 | + |
| 127 | + server = Server("test", on_call_tool=handle_call_tool) |
| 128 | + |
| 129 | + to_server, server_read = anyio.create_memory_object_stream[SessionMessage | Exception](10) |
| 130 | + server_write, from_server = anyio.create_memory_object_stream[SessionMessage](10) |
| 131 | + |
| 132 | + async def run_server(): |
| 133 | + await server.run(server_read, server_write, server.create_initialization_options()) |
| 134 | + server_run_returned.set() |
| 135 | + |
| 136 | + init_req = JSONRPCRequest( |
| 137 | + jsonrpc="2.0", |
| 138 | + id=1, |
| 139 | + method="initialize", |
| 140 | + params=InitializeRequestParams( |
| 141 | + protocol_version=LATEST_PROTOCOL_VERSION, |
| 142 | + capabilities=ClientCapabilities(), |
| 143 | + client_info=Implementation(name="test", version="1.0"), |
| 144 | + ).model_dump(by_alias=True, mode="json", exclude_none=True), |
| 145 | + ) |
| 146 | + initialized = JSONRPCNotification(jsonrpc="2.0", method="notifications/initialized") |
| 147 | + call_req = JSONRPCRequest( |
| 148 | + jsonrpc="2.0", |
| 149 | + id=2, |
| 150 | + method="tools/call", |
| 151 | + params=CallToolRequestParams(name="slow", arguments={}).model_dump(by_alias=True, mode="json"), |
| 152 | + ) |
| 153 | + |
| 154 | + with anyio.fail_after(5): |
| 155 | + async with anyio.create_task_group() as tg, to_server, server_read, server_write, from_server: |
| 156 | + tg.start_soon(run_server) |
| 157 | + |
| 158 | + await to_server.send(SessionMessage(init_req)) |
| 159 | + await from_server.receive() # init response |
| 160 | + await to_server.send(SessionMessage(initialized)) |
| 161 | + await to_server.send(SessionMessage(call_req)) |
| 162 | + |
| 163 | + await handler_started.wait() |
| 164 | + |
| 165 | + # Close the server's input stream — this is what stdin EOF does. |
| 166 | + # server.run()'s incoming_messages loop ends, finally-cancel fires, |
| 167 | + # handler gets CancelledError, server.run() returns. |
| 168 | + await to_server.aclose() |
| 169 | + |
| 170 | + await server_run_returned.wait() |
| 171 | + |
| 172 | + assert handler_cancelled.is_set() |
| 173 | + |
| 174 | + |
| 175 | +@pytest.mark.anyio |
| 176 | +async def test_server_handles_transport_close_with_pending_server_to_client_requests(): |
| 177 | + """When the transport closes while handlers are blocked on server→client |
| 178 | + requests (sampling, roots, elicitation), server.run() must still exit cleanly. |
| 179 | +
|
| 180 | + Two bugs covered: |
| 181 | + 1. _receive_loop's finally iterates _response_streams with await checkpoints |
| 182 | + inside; the woken handler's send_request finally pops from that dict |
| 183 | + before the next __next__() — RuntimeError: dictionary changed size. |
| 184 | + 2. The woken handler's MCPError is caught in _handle_request, which falls |
| 185 | + through to respond() against a write stream _receive_loop already closed. |
| 186 | + """ |
| 187 | + handlers_started = 0 |
| 188 | + both_started = anyio.Event() |
| 189 | + server_run_returned = anyio.Event() |
| 190 | + |
| 191 | + async def handle_call_tool(ctx: ServerRequestContext, params: CallToolRequestParams) -> CallToolResult: |
| 192 | + nonlocal handlers_started |
| 193 | + handlers_started += 1 |
| 194 | + if handlers_started == 2: |
| 195 | + both_started.set() |
| 196 | + # Blocks on send_request waiting for a client response that never comes. |
| 197 | + # _receive_loop's finally will wake this with CONNECTION_CLOSED. |
| 198 | + await ctx.session.list_roots() |
| 199 | + raise AssertionError # pragma: no cover |
| 200 | + |
| 201 | + server = Server("test", on_call_tool=handle_call_tool) |
| 202 | + |
| 203 | + to_server, server_read = anyio.create_memory_object_stream[SessionMessage | Exception](10) |
| 204 | + server_write, from_server = anyio.create_memory_object_stream[SessionMessage](10) |
| 205 | + |
| 206 | + async def run_server(): |
| 207 | + await server.run(server_read, server_write, server.create_initialization_options()) |
| 208 | + server_run_returned.set() |
| 209 | + |
| 210 | + init_req = JSONRPCRequest( |
| 211 | + jsonrpc="2.0", |
| 212 | + id=1, |
| 213 | + method="initialize", |
| 214 | + params=InitializeRequestParams( |
| 215 | + protocol_version=LATEST_PROTOCOL_VERSION, |
| 216 | + capabilities=ClientCapabilities(), |
| 217 | + client_info=Implementation(name="test", version="1.0"), |
| 218 | + ).model_dump(by_alias=True, mode="json", exclude_none=True), |
| 219 | + ) |
| 220 | + initialized = JSONRPCNotification(jsonrpc="2.0", method="notifications/initialized") |
| 221 | + |
| 222 | + with anyio.fail_after(5): |
| 223 | + async with anyio.create_task_group() as tg, to_server, server_read, server_write, from_server: |
| 224 | + tg.start_soon(run_server) |
| 225 | + |
| 226 | + await to_server.send(SessionMessage(init_req)) |
| 227 | + await from_server.receive() # init response |
| 228 | + await to_server.send(SessionMessage(initialized)) |
| 229 | + |
| 230 | + # Two tool calls → two handlers → two _response_streams entries. |
| 231 | + for rid in (2, 3): |
| 232 | + call_req = JSONRPCRequest( |
| 233 | + jsonrpc="2.0", |
| 234 | + id=rid, |
| 235 | + method="tools/call", |
| 236 | + params=CallToolRequestParams(name="t", arguments={}).model_dump(by_alias=True, mode="json"), |
| 237 | + ) |
| 238 | + await to_server.send(SessionMessage(call_req)) |
| 239 | + |
| 240 | + await both_started.wait() |
| 241 | + # Drain the two roots/list requests so send_request's _write_stream.send() |
| 242 | + # completes and both handlers are parked at response_stream_reader.receive(). |
| 243 | + await from_server.receive() |
| 244 | + await from_server.receive() |
| 245 | + |
| 246 | + await to_server.aclose() |
| 247 | + |
| 248 | + # Without the fixes: RuntimeError (dict mutation) or ClosedResourceError |
| 249 | + # (respond after write-stream close) escapes run_server and this hangs. |
| 250 | + await server_run_returned.wait() |
0 commit comments