mirror of
https://github.com/python/cpython.git
synced 2025-10-09 16:34:44 +00:00
bpo-41002: Optimize HTTPResponse.read with a given amount (GH-20943)
I've done the implementation for both non-chunked and chunked reads. I haven't benchmarked chunked reads because I don't currently have a convenient way to generate a high-bandwidth chunked stream, but I don't see any reason that it shouldn't enjoy the same benefits that the non-chunked case does. I've used the benchmark attached to the bpo bug to verify that performance now matches the unsized read case. Automerge-Triggered-By: @methane
This commit is contained in:
parent
cf18c9e9d4
commit
152f0b8bee
3 changed files with 53 additions and 10 deletions
|
@ -569,6 +569,33 @@ class BasicTest(TestCase):
|
|||
resp.close()
|
||||
self.assertTrue(resp.closed)
|
||||
|
||||
def test_partial_reads_past_end(self):
|
||||
# if we have Content-Length, clip reads to the end
|
||||
body = "HTTP/1.1 200 Ok\r\nContent-Length: 4\r\n\r\nText"
|
||||
sock = FakeSocket(body)
|
||||
resp = client.HTTPResponse(sock)
|
||||
resp.begin()
|
||||
self.assertEqual(resp.read(10), b'Text')
|
||||
self.assertTrue(resp.isclosed())
|
||||
self.assertFalse(resp.closed)
|
||||
resp.close()
|
||||
self.assertTrue(resp.closed)
|
||||
|
||||
def test_partial_readintos_past_end(self):
|
||||
# if we have Content-Length, clip readintos to the end
|
||||
body = "HTTP/1.1 200 Ok\r\nContent-Length: 4\r\n\r\nText"
|
||||
sock = FakeSocket(body)
|
||||
resp = client.HTTPResponse(sock)
|
||||
resp.begin()
|
||||
b = bytearray(10)
|
||||
n = resp.readinto(b)
|
||||
self.assertEqual(n, 4)
|
||||
self.assertEqual(bytes(b)[:4], b'Text')
|
||||
self.assertTrue(resp.isclosed())
|
||||
self.assertFalse(resp.closed)
|
||||
resp.close()
|
||||
self.assertTrue(resp.closed)
|
||||
|
||||
def test_partial_reads_no_content_length(self):
|
||||
# when no length is present, the socket should be gracefully closed when
|
||||
# all data was read
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue