BodyNotHttplibCompatible, ProtocolError, DecodeError, ReadTimeoutError, ResponseNotChunked, IncompleteRead, InvalidHeader, HTTPError, )
self._first_try = True self._data = b"" self._obj = zlib.decompressobj()
return getattr(self._obj, name)
if not data: return data
if not self._first_try: return self._obj.decompress(data)
self._data += data try: decompressed = self._obj.decompress(data) if decompressed: self._first_try = False self._data = None return decompressed except zlib.error: self._first_try = False self._obj = zlib.decompressobj(-zlib.MAX_WBITS) try: return self.decompress(self._data) finally: self._data = None
self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS) self._state = GzipDecoderState.FIRST_MEMBER
return getattr(self._obj, name)
ret = bytearray() if self._state == GzipDecoderState.SWALLOW_DATA or not data: return bytes(ret) while True: try: ret += self._obj.decompress(data) except zlib.error: previous_state = self._state # Ignore data after the first error self._state = GzipDecoderState.SWALLOW_DATA if previous_state == GzipDecoderState.OTHER_MEMBERS: # Allow trailing garbage acceptable in other gzip clients return bytes(ret) raise data = self._obj.unused_data if not data: return bytes(ret) self._state = GzipDecoderState.OTHER_MEMBERS self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
class BrotliDecoder(object): # Supports both 'brotlipy' and 'Brotli' packages # since they share an import name. The top branches # are for 'brotlipy' and bottom branches for 'Brotli' def __init__(self): self._obj = brotli.Decompressor() if hasattr(self._obj, "decompress"): self.decompress = self._obj.decompress else: self.decompress = self._obj.process
def flush(self): if hasattr(self._obj, "flush"): return self._obj.flush() return b""
""" From RFC7231: If one or more encodings have been applied to a representation, the sender that applied the encodings MUST generate a Content-Encoding header field that lists the content codings in the order in which they were applied. """
self._decoders = [_get_decoder(m.strip()) for m in modes.split(",")]
return self._decoders[0].flush()
for d in reversed(self._decoders): data = d.decompress(data) return data
if "," in mode: return MultiDecoder(mode)
if mode == "gzip": return GzipDecoder()
if brotli is not None and mode == "br": return BrotliDecoder()
return DeflateDecoder()
""" HTTP Response container.
Backwards-compatible to httplib's HTTPResponse but the response ``body`` is loaded and decoded on-demand when the ``data`` property is accessed. This class is also compatible with the Python standard library's :mod:`io` module, and can hence be treated as a readable object in the context of that framework.
Extra parameters for behaviour not present in httplib.HTTPResponse:
:param preload_content: If True, the response's body will be preloaded during construction.
:param decode_content: If True, will attempt to decode the body based on the 'content-encoding' header.
:param original_response: When this HTTPResponse wrapper is generated from an httplib.HTTPResponse object, it's convenient to include the original for debug purposes. It's otherwise unused.
:param retries: The retries contains the last :class:`~urllib3.util.retry.Retry` that was used during the request.
:param enforce_content_length: Enforce content length checking. Body returned by server must match value of Content-Length header, if present. Otherwise, raise error. """
CONTENT_DECODERS += ["br"]
self, body="", headers=None, status=0, version=0, reason=None, strict=0, preload_content=True, decode_content=True, original_response=None, pool=None, connection=None, msg=None, retries=None, enforce_content_length=False, request_method=None, request_url=None, auto_close=True, ):
else: self.headers = HTTPHeaderDict(headers)
self._body = body
# Are we using the chunked-style of transfer encoding? # Don't incur the penalty of creating a list and then discarding it
# Determine length of response
# If requested, preload the body. self._body = self.read(decode_content=decode_content)
""" Should we redirect and where to?
:returns: Truthy redirect location string if we got a redirect status code and valid location. ``None`` if redirect status and no location. ``False`` if not a redirect status code. """ if self.status in self.REDIRECT_STATUSES: return self.headers.get("location")
return False
""" Read and discard any remaining HTTP response data in the response connection.
Unread data in the HTTPResponse connection blocks the connection from being released back to the pool. """ try: self.read() except (HTTPError, SocketError, BaseSSLError, HTTPException): pass
def data(self): # For backwords-compat with earlier urllib3 0.4 and earlier. if self._body: return self._body
if self._fp: return self.read(cache_content=True)
def connection(self): return self._connection
return is_fp_closed(self._fp)
""" Obtain the number of bytes pulled over the wire so far. May differ from the amount of content returned by :meth:``HTTPResponse.read`` if bytes are encoded on the wire (e.g, compressed). """ return self._fp_bytes_read
""" Set initial length value for Response content if available. """
# This Response will fail with an IncompleteRead if it can't be # received as chunked. This method falls back to attempt reading # the response before raising an exception. log.warning( "Received response with both Content-Length and " "Transfer-Encoding set. This is expressly forbidden " "by RFC 7230 sec 3.3.2. Ignoring Content-Length and " "attempting to process response as Transfer-Encoding: " "chunked." ) return None
# RFC 7230 section 3.3.2 specifies multiple content lengths can # be sent in a single Content-Length header # (e.g. Content-Length: 42, 42). This line ensures the values # are all valid ints and that as long as the `set` length is 1, # all values are the same. Otherwise, the header is invalid. raise InvalidHeader( "Content-Length contained multiple " "unmatching values (%s)" % length ) except ValueError: length = None else: length = None
# Convert status to int for comparison # In some cases, httplib returns a status of "_UNKNOWN" except ValueError: status = 0
# Check for responses that shouldn't include a body
""" Set-up the _decoder attribute if necessary. """ # Note: content-encoding value should be case-insensitive, per RFC 7230 # Section 3.2 self._decoder = _get_decoder(content_encoding) encodings = [ e.strip() for e in content_encoding.split(",") if e.strip() in self.CONTENT_DECODERS ] if len(encodings): self._decoder = _get_decoder(content_encoding)
DECODER_ERROR_CLASSES += (brotli.error,)
""" Decode the data passed in and potentially flush the decoder. """ return data
data = self._decoder.decompress(data) except self.DECODER_ERROR_CLASSES as e: content_encoding = self.headers.get("content-encoding", "").lower() raise DecodeError( "Received response with content-encoding: %s, but " "failed to decode it." % content_encoding, e, ) data += self._flush_decoder()
""" Flushes the decoder. Should only be called if the decoder is actually being used. """ buf = self._decoder.decompress(b"") return buf + self._decoder.flush()
def _error_catcher(self): """ Catch low-level python exceptions, instead re-raising urllib3 variants, so that low-level exceptions are not leaked in the high-level api.
On exit, release the connection back to the pool. """
except SocketTimeout: # FIXME: Ideally we'd like to include the url in the ReadTimeoutError but # there is yet no clean way to get at it from this context. raise ReadTimeoutError(self._pool, None, "Read timed out.")
except BaseSSLError as e: # FIXME: Is there a better way to differentiate between SSLErrors? if "read operation timed out" not in str(e): # Defensive: # This shouldn't happen but just in case we're missing an edge # case, let's avoid swallowing SSL errors. raise
raise ReadTimeoutError(self._pool, None, "Read timed out.")
except (HTTPException, SocketError) as e: # This includes IncompleteRead. raise ProtocolError("Connection broken: %r" % e, e)
# If no exception is thrown, we should avoid cleaning up # unnecessarily. finally: # If we didn't terminate cleanly, we need to throw away our # connection. # The response may not be closed but we're not going to use it # anymore so close it now to ensure that the connection is # released back to the pool. if self._original_response: self._original_response.close()
# Closing the response may not actually be sufficient to close # everything, so if we have a hold of the connection close that # too. if self._connection: self._connection.close()
# If we hold the original response but it's closed now, we should # return the connection back to the pool.
""" Similar to :meth:`httplib.HTTPResponse.read`, but with two additional parameters: ``decode_content`` and ``cache_content``.
:param amt: How much of the content to read. If specified, caching is skipped because it doesn't make sense to cache partial content as the full response.
:param decode_content: If True, will attempt to decode the body based on the 'content-encoding' header.
:param cache_content: If True, will save the returned data such that the same result is returned despite of the state of the underlying file object. This is useful if you want the ``.data`` property to continue working after having ``.read()`` the file object. (Overridden if ``amt`` is set.) """ decode_content = self.decode_content
return
# cStringIO doesn't like amt=None data = self._fp.read() if not fp_closed else b"" flush_decoder = True else: amt != 0 and not data ): # Platform-specific: Buggy versions of Python. # Close the connection when no data is returned # # This is redundant to what httplib/http.client _should_ # already do. However, versions of python released before # December 15, 2012 (http://bugs.python.org/issue16298) do # not properly close the connection in all cases. There is # no harm in redundantly calling close. 0, None, ): # This is an edge case that httplib failed to cover due # to concerns of backward compatibility. We're # addressing it here to make sure IncompleteRead is # raised during streaming, so all calls with incorrect # Content-Length are caught. raise IncompleteRead(self._fp_bytes_read, self.length_remaining)
self._body = data
""" A generator wrapper for the read() method. A call will block until ``amt`` bytes have been read from the connection or until the connection is closed.
:param amt: How much of the content to read. The generator will return up to much data per iteration, but may return less. This is particularly likely when using compressed data. However, the empty string will never be returned.
:param decode_content: If True, will attempt to decode the body based on the 'content-encoding' header. """ else:
def from_httplib(ResponseCls, r, **response_kw): """ Given an :class:`httplib.HTTPResponse` instance ``r``, return a corresponding :class:`urllib3.response.HTTPResponse` object.
Remaining parameters are passed to the HTTPResponse constructor, along with ``original_response=r``. """
else: # Python 2.7 headers = HTTPHeaderDict.from_httplib(headers)
# HTTPResponse objects in Python 3 don't have a .strict attribute body=r, headers=headers, status=r.status, version=r.version, reason=r.reason, strict=strict, original_response=r, **response_kw )
# Backwards-compatibility methods for httplib.HTTPResponse return self.headers
# Backwards compatibility for http.cookiejar return self.headers
# Overrides from io.IOBase if not self.closed: self._fp.close()
if self._connection: self._connection.close()
if not self.auto_close: io.IOBase.close(self)
def closed(self): return io.IOBase.closed.__get__(self) return True elif hasattr(self._fp, "closed"): return self._fp.closed else: return True
if self._fp is None: raise IOError("HTTPResponse has no file to get a fileno from") elif hasattr(self._fp, "fileno"): return self._fp.fileno() else: raise IOError( "The file-like object this HTTPResponse is wrapped " "around has no file descriptor" )
if ( self._fp is not None and hasattr(self._fp, "flush") and not getattr(self._fp, "closed", False) ): return self._fp.flush()
# This method is required for `io` module compatibility. return True
# This method is required for `io` module compatibility. temp = self.read(len(b)) if len(temp) == 0: return 0 else: b[: len(temp)] = temp return len(temp)
""" Checks if the underlying file-like object looks like a httplib.HTTPResponse object. We do this by testing for the fp attribute. If it is present we assume it returns raw chunks as processed by read_chunked(). """
# First, we'll figure out length of a chunk and then # we'll try to read it from socket. except ValueError: # Invalid chunked protocol response, abort. self.close() raise httplib.IncompleteRead(line)
chunk = self._fp._safe_read(self.chunk_left) returned_chunk = chunk self._fp._safe_read(2) # Toss the CRLF at the end of the chunk. self.chunk_left = None value = self._fp._safe_read(amt) self._fp._safe_read(2) # Toss the CRLF at the end of the chunk. self.chunk_left = None returned_chunk = value else: # amt > self.chunk_left
""" Similar to :meth:`HTTPResponse.read`, but with an additional parameter: ``decode_content``.
:param amt: How much of the content to read. If specified, caching is skipped because it doesn't make sense to cache partial content as the full response.
:param decode_content: If True, will attempt to decode the body based on the 'content-encoding' header. """ # FIXME: Rewrite this method and make it a class with a better structured logic. raise ResponseNotChunked( "Response is not chunked. " "Header 'transfer-encoding: chunked' is missing." ) raise BodyNotHttplibCompatible( "Body should be httplib.HTTPResponse like. " "It should have have an fp attribute which returns raw chunks." )
# Don't bother reading the body of a HEAD request. self._original_response.close() return
# If a response is already read and closed # then return immediately. return
chunk, decode_content=decode_content, flush_decoder=False )
# On CPython and PyPy, we should never need to flush the # decoder. However, on Jython we *might* need to, so # lets defensively do it anyway. yield decoded
# Chunk content ends with \r\n: discard it. # Some sites may not end with '\r\n'. break
# We read everything; close the "file".
""" Returns the URL that was the source of this response. If the request that generated this response redirected, this method will return the final redirect location. """ if self.retries is not None and len(self.retries.history): return self.retries.history[-1].redirect_location else: return self._request_url
buffer = [] for chunk in self.stream(decode_content=True): if b"\n" in chunk: chunk = chunk.split(b"\n") yield b"".join(buffer) + chunk[0] + b"\n" for x in chunk[1:-1]: yield x + b"\n" if chunk[-1]: buffer = [chunk[-1]] else: buffer = [] else: buffer.append(chunk) if buffer: yield b"".join(buffer) |