diff --git a/httpx/_content.py b/httpx/_content.py index 6f479a0885..f9d2c2e872 100644 --- a/httpx/_content.py +++ b/httpx/_content.py @@ -59,9 +59,14 @@ def __iter__(self) -> Iterator[bytes]: yield chunk chunk = self._stream.read(self.CHUNK_SIZE) else: - # Otherwise iterate. + # Otherwise iterate, splitting large chunks. for part in self._stream: - yield part + # Split large chunks into CHUNK_SIZE pieces + offset = 0 + while offset < len(part): + chunk_size = min(self.CHUNK_SIZE, len(part) - offset) + yield part[offset : offset + chunk_size] + offset += chunk_size class AsyncIteratorByteStream(AsyncByteStream): diff --git a/tests/test_content.py b/tests/test_content.py index 9bfe983722..07b686c7cb 100644 --- a/tests/test_content.py +++ b/tests/test_content.py @@ -516,3 +516,28 @@ def test_allow_nan_false(): ValueError, match="Out of range float values are not JSON compliant" ): httpx.Response(200, json=data_with_inf) + + +@pytest.mark.anyio +async def test_iterator_content_splits_large_chunks(): + # Generator yielding a single large chunk (100 KB) + large_chunk = b"a" * 102_400 # 100 KB + + def gen() -> typing.Iterator[bytes]: + yield large_chunk + + # Pass generator to Request (internally uses IteratorByteStream) + request = httpx.Request(method, url, content=gen()) + + # Cast to Iterable[bytes] to make mypy happy + sync_stream: typing.Iterable[bytes] = request.stream # type: ignore + + # Collect chunks + chunks = list(sync_stream) + + # Each chunk must be <= 64 KB + for chunk in chunks: + assert len(chunk) <= 64 * 1024 + + # Total content matches original + assert b"".join(chunks) == large_chunk