Mercurial > repos > guerler > springsuite
comparison planemo/lib/python3.7/site-packages/urllib3/connectionpool.py @ 1:56ad4e20f292 draft
"planemo upload commit 6eee67778febed82ddd413c3ca40b3183a3898f1"
| author | guerler |
|---|---|
| date | Fri, 31 Jul 2020 00:32:28 -0400 |
| parents | |
| children |
comparison
equal
deleted
inserted
replaced
| 0:d30785e31577 | 1:56ad4e20f292 |
|---|---|
| 1 from __future__ import absolute_import | |
| 2 import errno | |
| 3 import logging | |
| 4 import sys | |
| 5 import warnings | |
| 6 | |
| 7 from socket import error as SocketError, timeout as SocketTimeout | |
| 8 import socket | |
| 9 | |
| 10 | |
| 11 from .exceptions import ( | |
| 12 ClosedPoolError, | |
| 13 ProtocolError, | |
| 14 EmptyPoolError, | |
| 15 HeaderParsingError, | |
| 16 HostChangedError, | |
| 17 LocationValueError, | |
| 18 MaxRetryError, | |
| 19 ProxyError, | |
| 20 ReadTimeoutError, | |
| 21 SSLError, | |
| 22 TimeoutError, | |
| 23 InsecureRequestWarning, | |
| 24 NewConnectionError, | |
| 25 ) | |
| 26 from .packages.ssl_match_hostname import CertificateError | |
| 27 from .packages import six | |
| 28 from .packages.six.moves import queue | |
| 29 from .connection import ( | |
| 30 port_by_scheme, | |
| 31 DummyConnection, | |
| 32 HTTPConnection, | |
| 33 HTTPSConnection, | |
| 34 VerifiedHTTPSConnection, | |
| 35 HTTPException, | |
| 36 BaseSSLError, | |
| 37 ) | |
| 38 from .request import RequestMethods | |
| 39 from .response import HTTPResponse | |
| 40 | |
| 41 from .util.connection import is_connection_dropped | |
| 42 from .util.request import set_file_position | |
| 43 from .util.response import assert_header_parsing | |
| 44 from .util.retry import Retry | |
| 45 from .util.timeout import Timeout | |
| 46 from .util.url import ( | |
| 47 get_host, | |
| 48 parse_url, | |
| 49 Url, | |
| 50 _normalize_host as normalize_host, | |
| 51 _encode_target, | |
| 52 ) | |
| 53 from .util.queue import LifoQueue | |
| 54 | |
| 55 | |
| 56 xrange = six.moves.xrange | |
| 57 | |
| 58 log = logging.getLogger(__name__) | |
| 59 | |
| 60 _Default = object() | |
| 61 | |
| 62 | |
| 63 # Pool objects | |
| 64 class ConnectionPool(object): | |
| 65 """ | |
| 66 Base class for all connection pools, such as | |
| 67 :class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`. | |
| 68 | |
| 69 .. note:: | |
| 70 ConnectionPool.urlopen() does not normalize or percent-encode target URIs | |
| 71 which is useful if your target server doesn't support percent-encoded | |
| 72 target URIs. | |
| 73 """ | |
| 74 | |
| 75 scheme = None | |
| 76 QueueCls = LifoQueue | |
| 77 | |
| 78 def __init__(self, host, port=None): | |
| 79 if not host: | |
| 80 raise LocationValueError("No host specified.") | |
| 81 | |
| 82 self.host = _normalize_host(host, scheme=self.scheme) | |
| 83 self._proxy_host = host.lower() | |
| 84 self.port = port | |
| 85 | |
| 86 def __str__(self): | |
| 87 return "%s(host=%r, port=%r)" % (type(self).__name__, self.host, self.port) | |
| 88 | |
| 89 def __enter__(self): | |
| 90 return self | |
| 91 | |
| 92 def __exit__(self, exc_type, exc_val, exc_tb): | |
| 93 self.close() | |
| 94 # Return False to re-raise any potential exceptions | |
| 95 return False | |
| 96 | |
| 97 def close(self): | |
| 98 """ | |
| 99 Close all pooled connections and disable the pool. | |
| 100 """ | |
| 101 pass | |
| 102 | |
| 103 | |
| 104 # This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252 | |
| 105 _blocking_errnos = {errno.EAGAIN, errno.EWOULDBLOCK} | |
| 106 | |
| 107 | |
| 108 class HTTPConnectionPool(ConnectionPool, RequestMethods): | |
| 109 """ | |
| 110 Thread-safe connection pool for one host. | |
| 111 | |
| 112 :param host: | |
| 113 Host used for this HTTP Connection (e.g. "localhost"), passed into | |
| 114 :class:`httplib.HTTPConnection`. | |
| 115 | |
| 116 :param port: | |
| 117 Port used for this HTTP Connection (None is equivalent to 80), passed | |
| 118 into :class:`httplib.HTTPConnection`. | |
| 119 | |
| 120 :param strict: | |
| 121 Causes BadStatusLine to be raised if the status line can't be parsed | |
| 122 as a valid HTTP/1.0 or 1.1 status line, passed into | |
| 123 :class:`httplib.HTTPConnection`. | |
| 124 | |
| 125 .. note:: | |
| 126 Only works in Python 2. This parameter is ignored in Python 3. | |
| 127 | |
| 128 :param timeout: | |
| 129 Socket timeout in seconds for each individual connection. This can | |
| 130 be a float or integer, which sets the timeout for the HTTP request, | |
| 131 or an instance of :class:`urllib3.util.Timeout` which gives you more | |
| 132 fine-grained control over request timeouts. After the constructor has | |
| 133 been parsed, this is always a `urllib3.util.Timeout` object. | |
| 134 | |
| 135 :param maxsize: | |
| 136 Number of connections to save that can be reused. More than 1 is useful | |
| 137 in multithreaded situations. If ``block`` is set to False, more | |
| 138 connections will be created but they will not be saved once they've | |
| 139 been used. | |
| 140 | |
| 141 :param block: | |
| 142 If set to True, no more than ``maxsize`` connections will be used at | |
| 143 a time. When no free connections are available, the call will block | |
| 144 until a connection has been released. This is a useful side effect for | |
| 145 particular multithreaded situations where one does not want to use more | |
| 146 than maxsize connections per host to prevent flooding. | |
| 147 | |
| 148 :param headers: | |
| 149 Headers to include with all requests, unless other headers are given | |
| 150 explicitly. | |
| 151 | |
| 152 :param retries: | |
| 153 Retry configuration to use by default with requests in this pool. | |
| 154 | |
| 155 :param _proxy: | |
| 156 Parsed proxy URL, should not be used directly, instead, see | |
| 157 :class:`urllib3.connectionpool.ProxyManager`" | |
| 158 | |
| 159 :param _proxy_headers: | |
| 160 A dictionary with proxy headers, should not be used directly, | |
| 161 instead, see :class:`urllib3.connectionpool.ProxyManager`" | |
| 162 | |
| 163 :param \\**conn_kw: | |
| 164 Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`, | |
| 165 :class:`urllib3.connection.HTTPSConnection` instances. | |
| 166 """ | |
| 167 | |
| 168 scheme = "http" | |
| 169 ConnectionCls = HTTPConnection | |
| 170 ResponseCls = HTTPResponse | |
| 171 | |
| 172 def __init__( | |
| 173 self, | |
| 174 host, | |
| 175 port=None, | |
| 176 strict=False, | |
| 177 timeout=Timeout.DEFAULT_TIMEOUT, | |
| 178 maxsize=1, | |
| 179 block=False, | |
| 180 headers=None, | |
| 181 retries=None, | |
| 182 _proxy=None, | |
| 183 _proxy_headers=None, | |
| 184 **conn_kw | |
| 185 ): | |
| 186 ConnectionPool.__init__(self, host, port) | |
| 187 RequestMethods.__init__(self, headers) | |
| 188 | |
| 189 self.strict = strict | |
| 190 | |
| 191 if not isinstance(timeout, Timeout): | |
| 192 timeout = Timeout.from_float(timeout) | |
| 193 | |
| 194 if retries is None: | |
| 195 retries = Retry.DEFAULT | |
| 196 | |
| 197 self.timeout = timeout | |
| 198 self.retries = retries | |
| 199 | |
| 200 self.pool = self.QueueCls(maxsize) | |
| 201 self.block = block | |
| 202 | |
| 203 self.proxy = _proxy | |
| 204 self.proxy_headers = _proxy_headers or {} | |
| 205 | |
| 206 # Fill the queue up so that doing get() on it will block properly | |
| 207 for _ in xrange(maxsize): | |
| 208 self.pool.put(None) | |
| 209 | |
| 210 # These are mostly for testing and debugging purposes. | |
| 211 self.num_connections = 0 | |
| 212 self.num_requests = 0 | |
| 213 self.conn_kw = conn_kw | |
| 214 | |
| 215 if self.proxy: | |
| 216 # Enable Nagle's algorithm for proxies, to avoid packet fragmentation. | |
| 217 # We cannot know if the user has added default socket options, so we cannot replace the | |
| 218 # list. | |
| 219 self.conn_kw.setdefault("socket_options", []) | |
| 220 | |
| 221 def _new_conn(self): | |
| 222 """ | |
| 223 Return a fresh :class:`HTTPConnection`. | |
| 224 """ | |
| 225 self.num_connections += 1 | |
| 226 log.debug( | |
| 227 "Starting new HTTP connection (%d): %s:%s", | |
| 228 self.num_connections, | |
| 229 self.host, | |
| 230 self.port or "80", | |
| 231 ) | |
| 232 | |
| 233 conn = self.ConnectionCls( | |
| 234 host=self.host, | |
| 235 port=self.port, | |
| 236 timeout=self.timeout.connect_timeout, | |
| 237 strict=self.strict, | |
| 238 **self.conn_kw | |
| 239 ) | |
| 240 return conn | |
| 241 | |
| 242 def _get_conn(self, timeout=None): | |
| 243 """ | |
| 244 Get a connection. Will return a pooled connection if one is available. | |
| 245 | |
| 246 If no connections are available and :prop:`.block` is ``False``, then a | |
| 247 fresh connection is returned. | |
| 248 | |
| 249 :param timeout: | |
| 250 Seconds to wait before giving up and raising | |
| 251 :class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and | |
| 252 :prop:`.block` is ``True``. | |
| 253 """ | |
| 254 conn = None | |
| 255 try: | |
| 256 conn = self.pool.get(block=self.block, timeout=timeout) | |
| 257 | |
| 258 except AttributeError: # self.pool is None | |
| 259 raise ClosedPoolError(self, "Pool is closed.") | |
| 260 | |
| 261 except queue.Empty: | |
| 262 if self.block: | |
| 263 raise EmptyPoolError( | |
| 264 self, | |
| 265 "Pool reached maximum size and no more connections are allowed.", | |
| 266 ) | |
| 267 pass # Oh well, we'll create a new connection then | |
| 268 | |
| 269 # If this is a persistent connection, check if it got disconnected | |
| 270 if conn and is_connection_dropped(conn): | |
| 271 log.debug("Resetting dropped connection: %s", self.host) | |
| 272 conn.close() | |
| 273 if getattr(conn, "auto_open", 1) == 0: | |
| 274 # This is a proxied connection that has been mutated by | |
| 275 # httplib._tunnel() and cannot be reused (since it would | |
| 276 # attempt to bypass the proxy) | |
| 277 conn = None | |
| 278 | |
| 279 return conn or self._new_conn() | |
| 280 | |
| 281 def _put_conn(self, conn): | |
| 282 """ | |
| 283 Put a connection back into the pool. | |
| 284 | |
| 285 :param conn: | |
| 286 Connection object for the current host and port as returned by | |
| 287 :meth:`._new_conn` or :meth:`._get_conn`. | |
| 288 | |
| 289 If the pool is already full, the connection is closed and discarded | |
| 290 because we exceeded maxsize. If connections are discarded frequently, | |
| 291 then maxsize should be increased. | |
| 292 | |
| 293 If the pool is closed, then the connection will be closed and discarded. | |
| 294 """ | |
| 295 try: | |
| 296 self.pool.put(conn, block=False) | |
| 297 return # Everything is dandy, done. | |
| 298 except AttributeError: | |
| 299 # self.pool is None. | |
| 300 pass | |
| 301 except queue.Full: | |
| 302 # This should never happen if self.block == True | |
| 303 log.warning("Connection pool is full, discarding connection: %s", self.host) | |
| 304 | |
| 305 # Connection never got put back into the pool, close it. | |
| 306 if conn: | |
| 307 conn.close() | |
| 308 | |
| 309 def _validate_conn(self, conn): | |
| 310 """ | |
| 311 Called right before a request is made, after the socket is created. | |
| 312 """ | |
| 313 pass | |
| 314 | |
| 315 def _prepare_proxy(self, conn): | |
| 316 # Nothing to do for HTTP connections. | |
| 317 pass | |
| 318 | |
| 319 def _get_timeout(self, timeout): | |
| 320 """ Helper that always returns a :class:`urllib3.util.Timeout` """ | |
| 321 if timeout is _Default: | |
| 322 return self.timeout.clone() | |
| 323 | |
| 324 if isinstance(timeout, Timeout): | |
| 325 return timeout.clone() | |
| 326 else: | |
| 327 # User passed us an int/float. This is for backwards compatibility, | |
| 328 # can be removed later | |
| 329 return Timeout.from_float(timeout) | |
| 330 | |
| 331 def _raise_timeout(self, err, url, timeout_value): | |
| 332 """Is the error actually a timeout? Will raise a ReadTimeout or pass""" | |
| 333 | |
| 334 if isinstance(err, SocketTimeout): | |
| 335 raise ReadTimeoutError( | |
| 336 self, url, "Read timed out. (read timeout=%s)" % timeout_value | |
| 337 ) | |
| 338 | |
| 339 # See the above comment about EAGAIN in Python 3. In Python 2 we have | |
| 340 # to specifically catch it and throw the timeout error | |
| 341 if hasattr(err, "errno") and err.errno in _blocking_errnos: | |
| 342 raise ReadTimeoutError( | |
| 343 self, url, "Read timed out. (read timeout=%s)" % timeout_value | |
| 344 ) | |
| 345 | |
| 346 # Catch possible read timeouts thrown as SSL errors. If not the | |
| 347 # case, rethrow the original. We need to do this because of: | |
| 348 # http://bugs.python.org/issue10272 | |
| 349 if "timed out" in str(err) or "did not complete (read)" in str( | |
| 350 err | |
| 351 ): # Python < 2.7.4 | |
| 352 raise ReadTimeoutError( | |
| 353 self, url, "Read timed out. (read timeout=%s)" % timeout_value | |
| 354 ) | |
| 355 | |
| 356 def _make_request( | |
| 357 self, conn, method, url, timeout=_Default, chunked=False, **httplib_request_kw | |
| 358 ): | |
| 359 """ | |
| 360 Perform a request on a given urllib connection object taken from our | |
| 361 pool. | |
| 362 | |
| 363 :param conn: | |
| 364 a connection from one of our connection pools | |
| 365 | |
| 366 :param timeout: | |
| 367 Socket timeout in seconds for the request. This can be a | |
| 368 float or integer, which will set the same timeout value for | |
| 369 the socket connect and the socket read, or an instance of | |
| 370 :class:`urllib3.util.Timeout`, which gives you more fine-grained | |
| 371 control over your timeouts. | |
| 372 """ | |
| 373 self.num_requests += 1 | |
| 374 | |
| 375 timeout_obj = self._get_timeout(timeout) | |
| 376 timeout_obj.start_connect() | |
| 377 conn.timeout = timeout_obj.connect_timeout | |
| 378 | |
| 379 # Trigger any extra validation we need to do. | |
| 380 try: | |
| 381 self._validate_conn(conn) | |
| 382 except (SocketTimeout, BaseSSLError) as e: | |
| 383 # Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout. | |
| 384 self._raise_timeout(err=e, url=url, timeout_value=conn.timeout) | |
| 385 raise | |
| 386 | |
| 387 # conn.request() calls httplib.*.request, not the method in | |
| 388 # urllib3.request. It also calls makefile (recv) on the socket. | |
| 389 if chunked: | |
| 390 conn.request_chunked(method, url, **httplib_request_kw) | |
| 391 else: | |
| 392 conn.request(method, url, **httplib_request_kw) | |
| 393 | |
| 394 # Reset the timeout for the recv() on the socket | |
| 395 read_timeout = timeout_obj.read_timeout | |
| 396 | |
| 397 # App Engine doesn't have a sock attr | |
| 398 if getattr(conn, "sock", None): | |
| 399 # In Python 3 socket.py will catch EAGAIN and return None when you | |
| 400 # try and read into the file pointer created by http.client, which | |
| 401 # instead raises a BadStatusLine exception. Instead of catching | |
| 402 # the exception and assuming all BadStatusLine exceptions are read | |
| 403 # timeouts, check for a zero timeout before making the request. | |
| 404 if read_timeout == 0: | |
| 405 raise ReadTimeoutError( | |
| 406 self, url, "Read timed out. (read timeout=%s)" % read_timeout | |
| 407 ) | |
| 408 if read_timeout is Timeout.DEFAULT_TIMEOUT: | |
| 409 conn.sock.settimeout(socket.getdefaulttimeout()) | |
| 410 else: # None or a value | |
| 411 conn.sock.settimeout(read_timeout) | |
| 412 | |
| 413 # Receive the response from the server | |
| 414 try: | |
| 415 try: | |
| 416 # Python 2.7, use buffering of HTTP responses | |
| 417 httplib_response = conn.getresponse(buffering=True) | |
| 418 except TypeError: | |
| 419 # Python 3 | |
| 420 try: | |
| 421 httplib_response = conn.getresponse() | |
| 422 except BaseException as e: | |
| 423 # Remove the TypeError from the exception chain in | |
| 424 # Python 3 (including for exceptions like SystemExit). | |
| 425 # Otherwise it looks like a bug in the code. | |
| 426 six.raise_from(e, None) | |
| 427 except (SocketTimeout, BaseSSLError, SocketError) as e: | |
| 428 self._raise_timeout(err=e, url=url, timeout_value=read_timeout) | |
| 429 raise | |
| 430 | |
| 431 # AppEngine doesn't have a version attr. | |
| 432 http_version = getattr(conn, "_http_vsn_str", "HTTP/?") | |
| 433 log.debug( | |
| 434 '%s://%s:%s "%s %s %s" %s %s', | |
| 435 self.scheme, | |
| 436 self.host, | |
| 437 self.port, | |
| 438 method, | |
| 439 url, | |
| 440 http_version, | |
| 441 httplib_response.status, | |
| 442 httplib_response.length, | |
| 443 ) | |
| 444 | |
| 445 try: | |
| 446 assert_header_parsing(httplib_response.msg) | |
| 447 except (HeaderParsingError, TypeError) as hpe: # Platform-specific: Python 3 | |
| 448 log.warning( | |
| 449 "Failed to parse headers (url=%s): %s", | |
| 450 self._absolute_url(url), | |
| 451 hpe, | |
| 452 exc_info=True, | |
| 453 ) | |
| 454 | |
| 455 return httplib_response | |
| 456 | |
| 457 def _absolute_url(self, path): | |
| 458 return Url(scheme=self.scheme, host=self.host, port=self.port, path=path).url | |
| 459 | |
| 460 def close(self): | |
| 461 """ | |
| 462 Close all pooled connections and disable the pool. | |
| 463 """ | |
| 464 if self.pool is None: | |
| 465 return | |
| 466 # Disable access to the pool | |
| 467 old_pool, self.pool = self.pool, None | |
| 468 | |
| 469 try: | |
| 470 while True: | |
| 471 conn = old_pool.get(block=False) | |
| 472 if conn: | |
| 473 conn.close() | |
| 474 | |
| 475 except queue.Empty: | |
| 476 pass # Done. | |
| 477 | |
| 478 def is_same_host(self, url): | |
| 479 """ | |
| 480 Check if the given ``url`` is a member of the same host as this | |
| 481 connection pool. | |
| 482 """ | |
| 483 if url.startswith("/"): | |
| 484 return True | |
| 485 | |
| 486 # TODO: Add optional support for socket.gethostbyname checking. | |
| 487 scheme, host, port = get_host(url) | |
| 488 if host is not None: | |
| 489 host = _normalize_host(host, scheme=scheme) | |
| 490 | |
| 491 # Use explicit default port for comparison when none is given | |
| 492 if self.port and not port: | |
| 493 port = port_by_scheme.get(scheme) | |
| 494 elif not self.port and port == port_by_scheme.get(scheme): | |
| 495 port = None | |
| 496 | |
| 497 return (scheme, host, port) == (self.scheme, self.host, self.port) | |
| 498 | |
| 499 def urlopen( | |
| 500 self, | |
| 501 method, | |
| 502 url, | |
| 503 body=None, | |
| 504 headers=None, | |
| 505 retries=None, | |
| 506 redirect=True, | |
| 507 assert_same_host=True, | |
| 508 timeout=_Default, | |
| 509 pool_timeout=None, | |
| 510 release_conn=None, | |
| 511 chunked=False, | |
| 512 body_pos=None, | |
| 513 **response_kw | |
| 514 ): | |
| 515 """ | |
| 516 Get a connection from the pool and perform an HTTP request. This is the | |
| 517 lowest level call for making a request, so you'll need to specify all | |
| 518 the raw details. | |
| 519 | |
| 520 .. note:: | |
| 521 | |
| 522 More commonly, it's appropriate to use a convenience method provided | |
| 523 by :class:`.RequestMethods`, such as :meth:`request`. | |
| 524 | |
| 525 .. note:: | |
| 526 | |
| 527 `release_conn` will only behave as expected if | |
| 528 `preload_content=False` because we want to make | |
| 529 `preload_content=False` the default behaviour someday soon without | |
| 530 breaking backwards compatibility. | |
| 531 | |
| 532 :param method: | |
| 533 HTTP request method (such as GET, POST, PUT, etc.) | |
| 534 | |
| 535 :param body: | |
| 536 Data to send in the request body (useful for creating | |
| 537 POST requests, see HTTPConnectionPool.post_url for | |
| 538 more convenience). | |
| 539 | |
| 540 :param headers: | |
| 541 Dictionary of custom headers to send, such as User-Agent, | |
| 542 If-None-Match, etc. If None, pool headers are used. If provided, | |
| 543 these headers completely replace any pool-specific headers. | |
| 544 | |
| 545 :param retries: | |
| 546 Configure the number of retries to allow before raising a | |
| 547 :class:`~urllib3.exceptions.MaxRetryError` exception. | |
| 548 | |
| 549 Pass ``None`` to retry until you receive a response. Pass a | |
| 550 :class:`~urllib3.util.retry.Retry` object for fine-grained control | |
| 551 over different types of retries. | |
| 552 Pass an integer number to retry connection errors that many times, | |
| 553 but no other types of errors. Pass zero to never retry. | |
| 554 | |
| 555 If ``False``, then retries are disabled and any exception is raised | |
| 556 immediately. Also, instead of raising a MaxRetryError on redirects, | |
| 557 the redirect response will be returned. | |
| 558 | |
| 559 :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int. | |
| 560 | |
| 561 :param redirect: | |
| 562 If True, automatically handle redirects (status codes 301, 302, | |
| 563 303, 307, 308). Each redirect counts as a retry. Disabling retries | |
| 564 will disable redirect, too. | |
| 565 | |
| 566 :param assert_same_host: | |
| 567 If ``True``, will make sure that the host of the pool requests is | |
| 568 consistent else will raise HostChangedError. When False, you can | |
| 569 use the pool on an HTTP proxy and request foreign hosts. | |
| 570 | |
| 571 :param timeout: | |
| 572 If specified, overrides the default timeout for this one | |
| 573 request. It may be a float (in seconds) or an instance of | |
| 574 :class:`urllib3.util.Timeout`. | |
| 575 | |
| 576 :param pool_timeout: | |
| 577 If set and the pool is set to block=True, then this method will | |
| 578 block for ``pool_timeout`` seconds and raise EmptyPoolError if no | |
| 579 connection is available within the time period. | |
| 580 | |
| 581 :param release_conn: | |
| 582 If False, then the urlopen call will not release the connection | |
| 583 back into the pool once a response is received (but will release if | |
| 584 you read the entire contents of the response such as when | |
| 585 `preload_content=True`). This is useful if you're not preloading | |
| 586 the response's content immediately. You will need to call | |
| 587 ``r.release_conn()`` on the response ``r`` to return the connection | |
| 588 back into the pool. If None, it takes the value of | |
| 589 ``response_kw.get('preload_content', True)``. | |
| 590 | |
| 591 :param chunked: | |
| 592 If True, urllib3 will send the body using chunked transfer | |
| 593 encoding. Otherwise, urllib3 will send the body using the standard | |
| 594 content-length form. Defaults to False. | |
| 595 | |
| 596 :param int body_pos: | |
| 597 Position to seek to in file-like body in the event of a retry or | |
| 598 redirect. Typically this won't need to be set because urllib3 will | |
| 599 auto-populate the value when needed. | |
| 600 | |
| 601 :param \\**response_kw: | |
| 602 Additional parameters are passed to | |
| 603 :meth:`urllib3.response.HTTPResponse.from_httplib` | |
| 604 """ | |
| 605 if headers is None: | |
| 606 headers = self.headers | |
| 607 | |
| 608 if not isinstance(retries, Retry): | |
| 609 retries = Retry.from_int(retries, redirect=redirect, default=self.retries) | |
| 610 | |
| 611 if release_conn is None: | |
| 612 release_conn = response_kw.get("preload_content", True) | |
| 613 | |
| 614 # Check host | |
| 615 if assert_same_host and not self.is_same_host(url): | |
| 616 raise HostChangedError(self, url, retries) | |
| 617 | |
| 618 # Ensure that the URL we're connecting to is properly encoded | |
| 619 if url.startswith("/"): | |
| 620 url = six.ensure_str(_encode_target(url)) | |
| 621 else: | |
| 622 url = six.ensure_str(parse_url(url).url) | |
| 623 | |
| 624 conn = None | |
| 625 | |
| 626 # Track whether `conn` needs to be released before | |
| 627 # returning/raising/recursing. Update this variable if necessary, and | |
| 628 # leave `release_conn` constant throughout the function. That way, if | |
| 629 # the function recurses, the original value of `release_conn` will be | |
| 630 # passed down into the recursive call, and its value will be respected. | |
| 631 # | |
| 632 # See issue #651 [1] for details. | |
| 633 # | |
| 634 # [1] <https://github.com/urllib3/urllib3/issues/651> | |
| 635 release_this_conn = release_conn | |
| 636 | |
| 637 # Merge the proxy headers. Only do this in HTTP. We have to copy the | |
| 638 # headers dict so we can safely change it without those changes being | |
| 639 # reflected in anyone else's copy. | |
| 640 if self.scheme == "http": | |
| 641 headers = headers.copy() | |
| 642 headers.update(self.proxy_headers) | |
| 643 | |
| 644 # Must keep the exception bound to a separate variable or else Python 3 | |
| 645 # complains about UnboundLocalError. | |
| 646 err = None | |
| 647 | |
| 648 # Keep track of whether we cleanly exited the except block. This | |
| 649 # ensures we do proper cleanup in finally. | |
| 650 clean_exit = False | |
| 651 | |
| 652 # Rewind body position, if needed. Record current position | |
| 653 # for future rewinds in the event of a redirect/retry. | |
| 654 body_pos = set_file_position(body, body_pos) | |
| 655 | |
| 656 try: | |
| 657 # Request a connection from the queue. | |
| 658 timeout_obj = self._get_timeout(timeout) | |
| 659 conn = self._get_conn(timeout=pool_timeout) | |
| 660 | |
| 661 conn.timeout = timeout_obj.connect_timeout | |
| 662 | |
| 663 is_new_proxy_conn = self.proxy is not None and not getattr( | |
| 664 conn, "sock", None | |
| 665 ) | |
| 666 if is_new_proxy_conn: | |
| 667 self._prepare_proxy(conn) | |
| 668 | |
| 669 # Make the request on the httplib connection object. | |
| 670 httplib_response = self._make_request( | |
| 671 conn, | |
| 672 method, | |
| 673 url, | |
| 674 timeout=timeout_obj, | |
| 675 body=body, | |
| 676 headers=headers, | |
| 677 chunked=chunked, | |
| 678 ) | |
| 679 | |
| 680 # If we're going to release the connection in ``finally:``, then | |
| 681 # the response doesn't need to know about the connection. Otherwise | |
| 682 # it will also try to release it and we'll have a double-release | |
| 683 # mess. | |
| 684 response_conn = conn if not release_conn else None | |
| 685 | |
| 686 # Pass method to Response for length checking | |
| 687 response_kw["request_method"] = method | |
| 688 | |
| 689 # Import httplib's response into our own wrapper object | |
| 690 response = self.ResponseCls.from_httplib( | |
| 691 httplib_response, | |
| 692 pool=self, | |
| 693 connection=response_conn, | |
| 694 retries=retries, | |
| 695 **response_kw | |
| 696 ) | |
| 697 | |
| 698 # Everything went great! | |
| 699 clean_exit = True | |
| 700 | |
| 701 except EmptyPoolError: | |
| 702 # Didn't get a connection from the pool, no need to clean up | |
| 703 clean_exit = True | |
| 704 release_this_conn = False | |
| 705 raise | |
| 706 | |
| 707 except ( | |
| 708 TimeoutError, | |
| 709 HTTPException, | |
| 710 SocketError, | |
| 711 ProtocolError, | |
| 712 BaseSSLError, | |
| 713 SSLError, | |
| 714 CertificateError, | |
| 715 ) as e: | |
| 716 # Discard the connection for these exceptions. It will be | |
| 717 # replaced during the next _get_conn() call. | |
| 718 clean_exit = False | |
| 719 if isinstance(e, (BaseSSLError, CertificateError)): | |
| 720 e = SSLError(e) | |
| 721 elif isinstance(e, (SocketError, NewConnectionError)) and self.proxy: | |
| 722 e = ProxyError("Cannot connect to proxy.", e) | |
| 723 elif isinstance(e, (SocketError, HTTPException)): | |
| 724 e = ProtocolError("Connection aborted.", e) | |
| 725 | |
| 726 retries = retries.increment( | |
| 727 method, url, error=e, _pool=self, _stacktrace=sys.exc_info()[2] | |
| 728 ) | |
| 729 retries.sleep() | |
| 730 | |
| 731 # Keep track of the error for the retry warning. | |
| 732 err = e | |
| 733 | |
| 734 finally: | |
| 735 if not clean_exit: | |
| 736 # We hit some kind of exception, handled or otherwise. We need | |
| 737 # to throw the connection away unless explicitly told not to. | |
| 738 # Close the connection, set the variable to None, and make sure | |
| 739 # we put the None back in the pool to avoid leaking it. | |
| 740 conn = conn and conn.close() | |
| 741 release_this_conn = True | |
| 742 | |
| 743 if release_this_conn: | |
| 744 # Put the connection back to be reused. If the connection is | |
| 745 # expired then it will be None, which will get replaced with a | |
| 746 # fresh connection during _get_conn. | |
| 747 self._put_conn(conn) | |
| 748 | |
| 749 if not conn: | |
| 750 # Try again | |
| 751 log.warning( | |
| 752 "Retrying (%r) after connection broken by '%r': %s", retries, err, url | |
| 753 ) | |
| 754 return self.urlopen( | |
| 755 method, | |
| 756 url, | |
| 757 body, | |
| 758 headers, | |
| 759 retries, | |
| 760 redirect, | |
| 761 assert_same_host, | |
| 762 timeout=timeout, | |
| 763 pool_timeout=pool_timeout, | |
| 764 release_conn=release_conn, | |
| 765 chunked=chunked, | |
| 766 body_pos=body_pos, | |
| 767 **response_kw | |
| 768 ) | |
| 769 | |
| 770 # Handle redirect? | |
| 771 redirect_location = redirect and response.get_redirect_location() | |
| 772 if redirect_location: | |
| 773 if response.status == 303: | |
| 774 method = "GET" | |
| 775 | |
| 776 try: | |
| 777 retries = retries.increment(method, url, response=response, _pool=self) | |
| 778 except MaxRetryError: | |
| 779 if retries.raise_on_redirect: | |
| 780 response.drain_conn() | |
| 781 raise | |
| 782 return response | |
| 783 | |
| 784 response.drain_conn() | |
| 785 retries.sleep_for_retry(response) | |
| 786 log.debug("Redirecting %s -> %s", url, redirect_location) | |
| 787 return self.urlopen( | |
| 788 method, | |
| 789 redirect_location, | |
| 790 body, | |
| 791 headers, | |
| 792 retries=retries, | |
| 793 redirect=redirect, | |
| 794 assert_same_host=assert_same_host, | |
| 795 timeout=timeout, | |
| 796 pool_timeout=pool_timeout, | |
| 797 release_conn=release_conn, | |
| 798 chunked=chunked, | |
| 799 body_pos=body_pos, | |
| 800 **response_kw | |
| 801 ) | |
| 802 | |
| 803 # Check if we should retry the HTTP response. | |
| 804 has_retry_after = bool(response.getheader("Retry-After")) | |
| 805 if retries.is_retry(method, response.status, has_retry_after): | |
| 806 try: | |
| 807 retries = retries.increment(method, url, response=response, _pool=self) | |
| 808 except MaxRetryError: | |
| 809 if retries.raise_on_status: | |
| 810 response.drain_conn() | |
| 811 raise | |
| 812 return response | |
| 813 | |
| 814 response.drain_conn() | |
| 815 retries.sleep(response) | |
| 816 log.debug("Retry: %s", url) | |
| 817 return self.urlopen( | |
| 818 method, | |
| 819 url, | |
| 820 body, | |
| 821 headers, | |
| 822 retries=retries, | |
| 823 redirect=redirect, | |
| 824 assert_same_host=assert_same_host, | |
| 825 timeout=timeout, | |
| 826 pool_timeout=pool_timeout, | |
| 827 release_conn=release_conn, | |
| 828 chunked=chunked, | |
| 829 body_pos=body_pos, | |
| 830 **response_kw | |
| 831 ) | |
| 832 | |
| 833 return response | |
| 834 | |
| 835 | |
| 836 class HTTPSConnectionPool(HTTPConnectionPool): | |
| 837 """ | |
| 838 Same as :class:`.HTTPConnectionPool`, but HTTPS. | |
| 839 | |
| 840 When Python is compiled with the :mod:`ssl` module, then | |
| 841 :class:`.VerifiedHTTPSConnection` is used, which *can* verify certificates, | |
| 842 instead of :class:`.HTTPSConnection`. | |
| 843 | |
| 844 :class:`.VerifiedHTTPSConnection` uses one of ``assert_fingerprint``, | |
| 845 ``assert_hostname`` and ``host`` in this order to verify connections. | |
| 846 If ``assert_hostname`` is False, no verification is done. | |
| 847 | |
| 848 The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``, | |
| 849 ``ca_cert_dir``, ``ssl_version``, ``key_password`` are only used if :mod:`ssl` | |
| 850 is available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade | |
| 851 the connection socket into an SSL socket. | |
| 852 """ | |
| 853 | |
| 854 scheme = "https" | |
| 855 ConnectionCls = HTTPSConnection | |
| 856 | |
| 857 def __init__( | |
| 858 self, | |
| 859 host, | |
| 860 port=None, | |
| 861 strict=False, | |
| 862 timeout=Timeout.DEFAULT_TIMEOUT, | |
| 863 maxsize=1, | |
| 864 block=False, | |
| 865 headers=None, | |
| 866 retries=None, | |
| 867 _proxy=None, | |
| 868 _proxy_headers=None, | |
| 869 key_file=None, | |
| 870 cert_file=None, | |
| 871 cert_reqs=None, | |
| 872 key_password=None, | |
| 873 ca_certs=None, | |
| 874 ssl_version=None, | |
| 875 assert_hostname=None, | |
| 876 assert_fingerprint=None, | |
| 877 ca_cert_dir=None, | |
| 878 **conn_kw | |
| 879 ): | |
| 880 | |
| 881 HTTPConnectionPool.__init__( | |
| 882 self, | |
| 883 host, | |
| 884 port, | |
| 885 strict, | |
| 886 timeout, | |
| 887 maxsize, | |
| 888 block, | |
| 889 headers, | |
| 890 retries, | |
| 891 _proxy, | |
| 892 _proxy_headers, | |
| 893 **conn_kw | |
| 894 ) | |
| 895 | |
| 896 self.key_file = key_file | |
| 897 self.cert_file = cert_file | |
| 898 self.cert_reqs = cert_reqs | |
| 899 self.key_password = key_password | |
| 900 self.ca_certs = ca_certs | |
| 901 self.ca_cert_dir = ca_cert_dir | |
| 902 self.ssl_version = ssl_version | |
| 903 self.assert_hostname = assert_hostname | |
| 904 self.assert_fingerprint = assert_fingerprint | |
| 905 | |
| 906 def _prepare_conn(self, conn): | |
| 907 """ | |
| 908 Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket` | |
| 909 and establish the tunnel if proxy is used. | |
| 910 """ | |
| 911 | |
| 912 if isinstance(conn, VerifiedHTTPSConnection): | |
| 913 conn.set_cert( | |
| 914 key_file=self.key_file, | |
| 915 key_password=self.key_password, | |
| 916 cert_file=self.cert_file, | |
| 917 cert_reqs=self.cert_reqs, | |
| 918 ca_certs=self.ca_certs, | |
| 919 ca_cert_dir=self.ca_cert_dir, | |
| 920 assert_hostname=self.assert_hostname, | |
| 921 assert_fingerprint=self.assert_fingerprint, | |
| 922 ) | |
| 923 conn.ssl_version = self.ssl_version | |
| 924 return conn | |
| 925 | |
| 926 def _prepare_proxy(self, conn): | |
| 927 """ | |
| 928 Establish tunnel connection early, because otherwise httplib | |
| 929 would improperly set Host: header to proxy's IP:port. | |
| 930 """ | |
| 931 conn.set_tunnel(self._proxy_host, self.port, self.proxy_headers) | |
| 932 conn.connect() | |
| 933 | |
| 934 def _new_conn(self): | |
| 935 """ | |
| 936 Return a fresh :class:`httplib.HTTPSConnection`. | |
| 937 """ | |
| 938 self.num_connections += 1 | |
| 939 log.debug( | |
| 940 "Starting new HTTPS connection (%d): %s:%s", | |
| 941 self.num_connections, | |
| 942 self.host, | |
| 943 self.port or "443", | |
| 944 ) | |
| 945 | |
| 946 if not self.ConnectionCls or self.ConnectionCls is DummyConnection: | |
| 947 raise SSLError( | |
| 948 "Can't connect to HTTPS URL because the SSL module is not available." | |
| 949 ) | |
| 950 | |
| 951 actual_host = self.host | |
| 952 actual_port = self.port | |
| 953 if self.proxy is not None: | |
| 954 actual_host = self.proxy.host | |
| 955 actual_port = self.proxy.port | |
| 956 | |
| 957 conn = self.ConnectionCls( | |
| 958 host=actual_host, | |
| 959 port=actual_port, | |
| 960 timeout=self.timeout.connect_timeout, | |
| 961 strict=self.strict, | |
| 962 cert_file=self.cert_file, | |
| 963 key_file=self.key_file, | |
| 964 key_password=self.key_password, | |
| 965 **self.conn_kw | |
| 966 ) | |
| 967 | |
| 968 return self._prepare_conn(conn) | |
| 969 | |
| 970 def _validate_conn(self, conn): | |
| 971 """ | |
| 972 Called right before a request is made, after the socket is created. | |
| 973 """ | |
| 974 super(HTTPSConnectionPool, self)._validate_conn(conn) | |
| 975 | |
| 976 # Force connect early to allow us to validate the connection. | |
| 977 if not getattr(conn, "sock", None): # AppEngine might not have `.sock` | |
| 978 conn.connect() | |
| 979 | |
| 980 if not conn.is_verified: | |
| 981 warnings.warn( | |
| 982 ( | |
| 983 "Unverified HTTPS request is being made to host '%s'. " | |
| 984 "Adding certificate verification is strongly advised. See: " | |
| 985 "https://urllib3.readthedocs.io/en/latest/advanced-usage.html" | |
| 986 "#ssl-warnings" % conn.host | |
| 987 ), | |
| 988 InsecureRequestWarning, | |
| 989 ) | |
| 990 | |
| 991 | |
| 992 def connection_from_url(url, **kw): | |
| 993 """ | |
| 994 Given a url, return an :class:`.ConnectionPool` instance of its host. | |
| 995 | |
| 996 This is a shortcut for not having to parse out the scheme, host, and port | |
| 997 of the url before creating an :class:`.ConnectionPool` instance. | |
| 998 | |
| 999 :param url: | |
| 1000 Absolute URL string that must include the scheme. Port is optional. | |
| 1001 | |
| 1002 :param \\**kw: | |
| 1003 Passes additional parameters to the constructor of the appropriate | |
| 1004 :class:`.ConnectionPool`. Useful for specifying things like | |
| 1005 timeout, maxsize, headers, etc. | |
| 1006 | |
| 1007 Example:: | |
| 1008 | |
| 1009 >>> conn = connection_from_url('http://google.com/') | |
| 1010 >>> r = conn.request('GET', '/') | |
| 1011 """ | |
| 1012 scheme, host, port = get_host(url) | |
| 1013 port = port or port_by_scheme.get(scheme, 80) | |
| 1014 if scheme == "https": | |
| 1015 return HTTPSConnectionPool(host, port=port, **kw) | |
| 1016 else: | |
| 1017 return HTTPConnectionPool(host, port=port, **kw) | |
| 1018 | |
| 1019 | |
| 1020 def _normalize_host(host, scheme): | |
| 1021 """ | |
| 1022 Normalize hosts for comparisons and use with sockets. | |
| 1023 """ | |
| 1024 | |
| 1025 host = normalize_host(host, scheme) | |
| 1026 | |
| 1027 # httplib doesn't like it when we include brackets in IPv6 addresses | |
| 1028 # Specifically, if we include brackets but also pass the port then | |
| 1029 # httplib crazily doubles up the square brackets on the Host header. | |
| 1030 # Instead, we need to make sure we never pass ``None`` as the port. | |
| 1031 # However, for backward compatibility reasons we can't actually | |
| 1032 # *assert* that. See http://bugs.python.org/issue28539 | |
| 1033 if host.startswith("[") and host.endswith("]"): | |
| 1034 host = host[1:-1] | |
| 1035 return host |
