1"""Parse (absolute and relative) URLs. 2 3urlparse module is based upon the following RFC specifications. 4 5RFC 3986 (STD66): "Uniform Resource Identifiers" by T. Berners-Lee, R. Fielding 6and L. Masinter, January 2005. 7 8RFC 2732 : "Format for Literal IPv6 Addresses in URL's by R.Hinden, B.Carpenter 9and L.Masinter, December 1999. 10 11RFC 2396: "Uniform Resource Identifiers (URI)": Generic Syntax by T. 12Berners-Lee, R. Fielding, and L. Masinter, August 1998. 13 14RFC 2368: "The mailto URL scheme", by P.Hoffman , L Masinter, J. Zawinski, July 1998. 15 16RFC 1808: "Relative Uniform Resource Locators", by R. Fielding, UC Irvine, June 171995. 18 19RFC 1738: "Uniform Resource Locators (URL)" by T. Berners-Lee, L. Masinter, M. 20McCahill, December 1994 21 22RFC 3986 is considered the current standard and any future changes to 23urlparse module should conform with it. The urlparse module is 24currently not entirely compliant with this RFC due to defacto 25scenarios for parsing, and for backward compatibility purposes, some 26parsing quirks from older RFCs are retained. The testcases in 27test_urlparse.py provides a good indicator of parsing behavior. 28 29The WHATWG URL Parser spec should also be considered. We are not compliant with 30it either due to existing user code API behavior expectations (Hyrum's Law). 31It serves as a useful guide when making changes. 32""" 33 34from collections import namedtuple 35import functools 36import re 37import sys 38import types 39import warnings 40import ipaddress 41 42__all__ = ["urlparse", "urlunparse", "urljoin", "urldefrag", 43 "urlsplit", "urlunsplit", "urlencode", "parse_qs", 44 "parse_qsl", "quote", "quote_plus", "quote_from_bytes", 45 "unquote", "unquote_plus", "unquote_to_bytes", 46 "DefragResult", "ParseResult", "SplitResult", 47 "DefragResultBytes", "ParseResultBytes", "SplitResultBytes"] 48 49# A classification of schemes. 50# The empty string classifies URLs with no scheme specified, 51# being the default value returned by “urlsplit” and “urlparse”. 52 53uses_relative = ['', 'ftp', 'http', 'gopher', 'nntp', 'imap', 54 'wais', 'file', 'https', 'shttp', 'mms', 55 'prospero', 'rtsp', 'rtspu', 'sftp', 56 'svn', 'svn+ssh', 'ws', 'wss'] 57 58uses_netloc = ['', 'ftp', 'http', 'gopher', 'nntp', 'telnet', 59 'imap', 'wais', 'file', 'mms', 'https', 'shttp', 60 'snews', 'prospero', 'rtsp', 'rtspu', 'rsync', 61 'svn', 'svn+ssh', 'sftp', 'nfs', 'git', 'git+ssh', 62 'ws', 'wss'] 63 64uses_params = ['', 'ftp', 'hdl', 'prospero', 'http', 'imap', 65 'https', 'shttp', 'rtsp', 'rtspu', 'sip', 'sips', 66 'mms', 'sftp', 'tel'] 67 68# These are not actually used anymore, but should stay for backwards 69# compatibility. (They are undocumented, but have a public-looking name.) 70 71non_hierarchical = ['gopher', 'hdl', 'mailto', 'news', 72 'telnet', 'wais', 'imap', 'snews', 'sip', 'sips'] 73 74uses_query = ['', 'http', 'wais', 'imap', 'https', 'shttp', 'mms', 75 'gopher', 'rtsp', 'rtspu', 'sip', 'sips'] 76 77uses_fragment = ['', 'ftp', 'hdl', 'http', 'gopher', 'news', 78 'nntp', 'wais', 'https', 'shttp', 'snews', 79 'file', 'prospero'] 80 81# Characters valid in scheme names 82scheme_chars = ('abcdefghijklmnopqrstuvwxyz' 83 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 84 '0123456789' 85 '+-.') 86 87# Leading and trailing C0 control and space to be stripped per WHATWG spec. 88# == "".join([chr(i) for i in range(0, 0x20 + 1)]) 89_WHATWG_C0_CONTROL_OR_SPACE = '\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f ' 90 91# Unsafe bytes to be removed per WHATWG spec 92_UNSAFE_URL_BYTES_TO_REMOVE = ['\t', '\r', '\n'] 93 94def clear_cache(): 95 """Clear internal performance caches. Undocumented; some tests want it.""" 96 urlsplit.cache_clear() 97 _byte_quoter_factory.cache_clear() 98 99# Helpers for bytes handling 100# For 3.2, we deliberately require applications that 101# handle improperly quoted URLs to do their own 102# decoding and encoding. If valid use cases are 103# presented, we may relax this by using latin-1 104# decoding internally for 3.3 105_implicit_encoding = 'ascii' 106_implicit_errors = 'strict' 107 108def _noop(obj): 109 return obj 110 111def _encode_result(obj, encoding=_implicit_encoding, 112 errors=_implicit_errors): 113 return obj.encode(encoding, errors) 114 115def _decode_args(args, encoding=_implicit_encoding, 116 errors=_implicit_errors): 117 return tuple(x.decode(encoding, errors) if x else '' for x in args) 118 119def _coerce_args(*args): 120 # Invokes decode if necessary to create str args 121 # and returns the coerced inputs along with 122 # an appropriate result coercion function 123 # - noop for str inputs 124 # - encoding function otherwise 125 str_input = isinstance(args[0], str) 126 for arg in args[1:]: 127 # We special-case the empty string to support the 128 # "scheme=''" default argument to some functions 129 if arg and isinstance(arg, str) != str_input: 130 raise TypeError("Cannot mix str and non-str arguments") 131 if str_input: 132 return args + (_noop,) 133 return _decode_args(args) + (_encode_result,) 134 135# Result objects are more helpful than simple tuples 136class _ResultMixinStr(object): 137 """Standard approach to encoding parsed results from str to bytes""" 138 __slots__ = () 139 140 def encode(self, encoding='ascii', errors='strict'): 141 return self._encoded_counterpart(*(x.encode(encoding, errors) for x in self)) 142 143 144class _ResultMixinBytes(object): 145 """Standard approach to decoding parsed results from bytes to str""" 146 __slots__ = () 147 148 def decode(self, encoding='ascii', errors='strict'): 149 return self._decoded_counterpart(*(x.decode(encoding, errors) for x in self)) 150 151 152class _NetlocResultMixinBase(object): 153 """Shared methods for the parsed result objects containing a netloc element""" 154 __slots__ = () 155 156 @property 157 def username(self): 158 return self._userinfo[0] 159 160 @property 161 def password(self): 162 return self._userinfo[1] 163 164 @property 165 def hostname(self): 166 hostname = self._hostinfo[0] 167 if not hostname: 168 return None 169 # Scoped IPv6 address may have zone info, which must not be lowercased 170 # like http://[fe80::822a:a8ff:fe49:470c%tESt]:1234/keys 171 separator = '%' if isinstance(hostname, str) else b'%' 172 hostname, percent, zone = hostname.partition(separator) 173 return hostname.lower() + percent + zone 174 175 @property 176 def port(self): 177 port = self._hostinfo[1] 178 if port is not None: 179 if port.isdigit() and port.isascii(): 180 port = int(port) 181 else: 182 raise ValueError(f"Port could not be cast to integer value as {port!r}") 183 if not (0 <= port <= 65535): 184 raise ValueError("Port out of range 0-65535") 185 return port 186 187 __class_getitem__ = classmethod(types.GenericAlias) 188 189 190class _NetlocResultMixinStr(_NetlocResultMixinBase, _ResultMixinStr): 191 __slots__ = () 192 193 @property 194 def _userinfo(self): 195 netloc = self.netloc 196 userinfo, have_info, hostinfo = netloc.rpartition('@') 197 if have_info: 198 username, have_password, password = userinfo.partition(':') 199 if not have_password: 200 password = None 201 else: 202 username = password = None 203 return username, password 204 205 @property 206 def _hostinfo(self): 207 netloc = self.netloc 208 _, _, hostinfo = netloc.rpartition('@') 209 _, have_open_br, bracketed = hostinfo.partition('[') 210 if have_open_br: 211 hostname, _, port = bracketed.partition(']') 212 _, _, port = port.partition(':') 213 else: 214 hostname, _, port = hostinfo.partition(':') 215 if not port: 216 port = None 217 return hostname, port 218 219 220class _NetlocResultMixinBytes(_NetlocResultMixinBase, _ResultMixinBytes): 221 __slots__ = () 222 223 @property 224 def _userinfo(self): 225 netloc = self.netloc 226 userinfo, have_info, hostinfo = netloc.rpartition(b'@') 227 if have_info: 228 username, have_password, password = userinfo.partition(b':') 229 if not have_password: 230 password = None 231 else: 232 username = password = None 233 return username, password 234 235 @property 236 def _hostinfo(self): 237 netloc = self.netloc 238 _, _, hostinfo = netloc.rpartition(b'@') 239 _, have_open_br, bracketed = hostinfo.partition(b'[') 240 if have_open_br: 241 hostname, _, port = bracketed.partition(b']') 242 _, _, port = port.partition(b':') 243 else: 244 hostname, _, port = hostinfo.partition(b':') 245 if not port: 246 port = None 247 return hostname, port 248 249 250_DefragResultBase = namedtuple('DefragResult', 'url fragment') 251_SplitResultBase = namedtuple( 252 'SplitResult', 'scheme netloc path query fragment') 253_ParseResultBase = namedtuple( 254 'ParseResult', 'scheme netloc path params query fragment') 255 256_DefragResultBase.__doc__ = """ 257DefragResult(url, fragment) 258 259A 2-tuple that contains the url without fragment identifier and the fragment 260identifier as a separate argument. 261""" 262 263_DefragResultBase.url.__doc__ = """The URL with no fragment identifier.""" 264 265_DefragResultBase.fragment.__doc__ = """ 266Fragment identifier separated from URL, that allows indirect identification of a 267secondary resource by reference to a primary resource and additional identifying 268information. 269""" 270 271_SplitResultBase.__doc__ = """ 272SplitResult(scheme, netloc, path, query, fragment) 273 274A 5-tuple that contains the different components of a URL. Similar to 275ParseResult, but does not split params. 276""" 277 278_SplitResultBase.scheme.__doc__ = """Specifies URL scheme for the request.""" 279 280_SplitResultBase.netloc.__doc__ = """ 281Network location where the request is made to. 282""" 283 284_SplitResultBase.path.__doc__ = """ 285The hierarchical path, such as the path to a file to download. 286""" 287 288_SplitResultBase.query.__doc__ = """ 289The query component, that contains non-hierarchical data, that along with data 290in path component, identifies a resource in the scope of URI's scheme and 291network location. 292""" 293 294_SplitResultBase.fragment.__doc__ = """ 295Fragment identifier, that allows indirect identification of a secondary resource 296by reference to a primary resource and additional identifying information. 297""" 298 299_ParseResultBase.__doc__ = """ 300ParseResult(scheme, netloc, path, params, query, fragment) 301 302A 6-tuple that contains components of a parsed URL. 303""" 304 305_ParseResultBase.scheme.__doc__ = _SplitResultBase.scheme.__doc__ 306_ParseResultBase.netloc.__doc__ = _SplitResultBase.netloc.__doc__ 307_ParseResultBase.path.__doc__ = _SplitResultBase.path.__doc__ 308_ParseResultBase.params.__doc__ = """ 309Parameters for last path element used to dereference the URI in order to provide 310access to perform some operation on the resource. 311""" 312 313_ParseResultBase.query.__doc__ = _SplitResultBase.query.__doc__ 314_ParseResultBase.fragment.__doc__ = _SplitResultBase.fragment.__doc__ 315 316 317# For backwards compatibility, alias _NetlocResultMixinStr 318# ResultBase is no longer part of the documented API, but it is 319# retained since deprecating it isn't worth the hassle 320ResultBase = _NetlocResultMixinStr 321 322# Structured result objects for string data 323class DefragResult(_DefragResultBase, _ResultMixinStr): 324 __slots__ = () 325 def geturl(self): 326 if self.fragment: 327 return self.url + '#' + self.fragment 328 else: 329 return self.url 330 331class SplitResult(_SplitResultBase, _NetlocResultMixinStr): 332 __slots__ = () 333 def geturl(self): 334 return urlunsplit(self) 335 336class ParseResult(_ParseResultBase, _NetlocResultMixinStr): 337 __slots__ = () 338 def geturl(self): 339 return urlunparse(self) 340 341# Structured result objects for bytes data 342class DefragResultBytes(_DefragResultBase, _ResultMixinBytes): 343 __slots__ = () 344 def geturl(self): 345 if self.fragment: 346 return self.url + b'#' + self.fragment 347 else: 348 return self.url 349 350class SplitResultBytes(_SplitResultBase, _NetlocResultMixinBytes): 351 __slots__ = () 352 def geturl(self): 353 return urlunsplit(self) 354 355class ParseResultBytes(_ParseResultBase, _NetlocResultMixinBytes): 356 __slots__ = () 357 def geturl(self): 358 return urlunparse(self) 359 360# Set up the encode/decode result pairs 361def _fix_result_transcoding(): 362 _result_pairs = ( 363 (DefragResult, DefragResultBytes), 364 (SplitResult, SplitResultBytes), 365 (ParseResult, ParseResultBytes), 366 ) 367 for _decoded, _encoded in _result_pairs: 368 _decoded._encoded_counterpart = _encoded 369 _encoded._decoded_counterpart = _decoded 370 371_fix_result_transcoding() 372del _fix_result_transcoding 373 374def urlparse(url, scheme='', allow_fragments=True): 375 """Parse a URL into 6 components: 376 <scheme>://<netloc>/<path>;<params>?<query>#<fragment> 377 378 The result is a named 6-tuple with fields corresponding to the 379 above. It is either a ParseResult or ParseResultBytes object, 380 depending on the type of the url parameter. 381 382 The username, password, hostname, and port sub-components of netloc 383 can also be accessed as attributes of the returned object. 384 385 The scheme argument provides the default value of the scheme 386 component when no scheme is found in url. 387 388 If allow_fragments is False, no attempt is made to separate the 389 fragment component from the previous component, which can be either 390 path or query. 391 392 Note that % escapes are not expanded. 393 """ 394 url, scheme, _coerce_result = _coerce_args(url, scheme) 395 splitresult = urlsplit(url, scheme, allow_fragments) 396 scheme, netloc, url, query, fragment = splitresult 397 if scheme in uses_params and ';' in url: 398 url, params = _splitparams(url) 399 else: 400 params = '' 401 result = ParseResult(scheme, netloc, url, params, query, fragment) 402 return _coerce_result(result) 403 404def _splitparams(url): 405 if '/' in url: 406 i = url.find(';', url.rfind('/')) 407 if i < 0: 408 return url, '' 409 else: 410 i = url.find(';') 411 return url[:i], url[i+1:] 412 413def _splitnetloc(url, start=0): 414 delim = len(url) # position of end of domain part of url, default is end 415 for c in '/?#': # look for delimiters; the order is NOT important 416 wdelim = url.find(c, start) # find first of this delim 417 if wdelim >= 0: # if found 418 delim = min(delim, wdelim) # use earliest delim position 419 return url[start:delim], url[delim:] # return (domain, rest) 420 421def _checknetloc(netloc): 422 if not netloc or netloc.isascii(): 423 return 424 # looking for characters like \u2100 that expand to 'a/c' 425 # IDNA uses NFKC equivalence, so normalize for this check 426 import unicodedata 427 n = netloc.replace('@', '') # ignore characters already included 428 n = n.replace(':', '') # but not the surrounding text 429 n = n.replace('#', '') 430 n = n.replace('?', '') 431 netloc2 = unicodedata.normalize('NFKC', n) 432 if n == netloc2: 433 return 434 for c in '/?#@:': 435 if c in netloc2: 436 raise ValueError("netloc '" + netloc + "' contains invalid " + 437 "characters under NFKC normalization") 438 439def _check_bracketed_netloc(netloc): 440 # Note that this function must mirror the splitting 441 # done in NetlocResultMixins._hostinfo(). 442 hostname_and_port = netloc.rpartition('@')[2] 443 before_bracket, have_open_br, bracketed = hostname_and_port.partition('[') 444 if have_open_br: 445 # No data is allowed before a bracket. 446 if before_bracket: 447 raise ValueError("Invalid IPv6 URL") 448 hostname, _, port = bracketed.partition(']') 449 # No data is allowed after the bracket but before the port delimiter. 450 if port and not port.startswith(":"): 451 raise ValueError("Invalid IPv6 URL") 452 else: 453 hostname, _, port = hostname_and_port.partition(':') 454 _check_bracketed_host(hostname) 455 456# Valid bracketed hosts are defined in 457# https://www.rfc-editor.org/rfc/rfc3986#page-49 and https://url.spec.whatwg.org/ 458def _check_bracketed_host(hostname): 459 if hostname.startswith('v'): 460 if not re.match(r"\Av[a-fA-F0-9]+\..+\Z", hostname): 461 raise ValueError(f"IPvFuture address is invalid") 462 else: 463 ip = ipaddress.ip_address(hostname) # Throws Value Error if not IPv6 or IPv4 464 if isinstance(ip, ipaddress.IPv4Address): 465 raise ValueError(f"An IPv4 address cannot be in brackets") 466 467# typed=True avoids BytesWarnings being emitted during cache key 468# comparison since this API supports both bytes and str input. 469@functools.lru_cache(typed=True) 470def urlsplit(url, scheme='', allow_fragments=True): 471 """Parse a URL into 5 components: 472 <scheme>://<netloc>/<path>?<query>#<fragment> 473 474 The result is a named 5-tuple with fields corresponding to the 475 above. It is either a SplitResult or SplitResultBytes object, 476 depending on the type of the url parameter. 477 478 The username, password, hostname, and port sub-components of netloc 479 can also be accessed as attributes of the returned object. 480 481 The scheme argument provides the default value of the scheme 482 component when no scheme is found in url. 483 484 If allow_fragments is False, no attempt is made to separate the 485 fragment component from the previous component, which can be either 486 path or query. 487 488 Note that % escapes are not expanded. 489 """ 490 491 url, scheme, _coerce_result = _coerce_args(url, scheme) 492 # Only lstrip url as some applications rely on preserving trailing space. 493 # (https://url.spec.whatwg.org/#concept-basic-url-parser would strip both) 494 url = url.lstrip(_WHATWG_C0_CONTROL_OR_SPACE) 495 scheme = scheme.strip(_WHATWG_C0_CONTROL_OR_SPACE) 496 497 for b in _UNSAFE_URL_BYTES_TO_REMOVE: 498 url = url.replace(b, "") 499 scheme = scheme.replace(b, "") 500 501 allow_fragments = bool(allow_fragments) 502 netloc = query = fragment = '' 503 i = url.find(':') 504 if i > 0 and url[0].isascii() and url[0].isalpha(): 505 for c in url[:i]: 506 if c not in scheme_chars: 507 break 508 else: 509 scheme, url = url[:i].lower(), url[i+1:] 510 if url[:2] == '//': 511 netloc, url = _splitnetloc(url, 2) 512 if (('[' in netloc and ']' not in netloc) or 513 (']' in netloc and '[' not in netloc)): 514 raise ValueError("Invalid IPv6 URL") 515 if '[' in netloc and ']' in netloc: 516 _check_bracketed_netloc(netloc) 517 if allow_fragments and '#' in url: 518 url, fragment = url.split('#', 1) 519 if '?' in url: 520 url, query = url.split('?', 1) 521 _checknetloc(netloc) 522 v = SplitResult(scheme, netloc, url, query, fragment) 523 return _coerce_result(v) 524 525def urlunparse(components): 526 """Put a parsed URL back together again. This may result in a 527 slightly different, but equivalent URL, if the URL that was parsed 528 originally had redundant delimiters, e.g. a ? with an empty query 529 (the draft states that these are equivalent).""" 530 scheme, netloc, url, params, query, fragment, _coerce_result = ( 531 _coerce_args(*components)) 532 if params: 533 url = "%s;%s" % (url, params) 534 return _coerce_result(urlunsplit((scheme, netloc, url, query, fragment))) 535 536def urlunsplit(components): 537 """Combine the elements of a tuple as returned by urlsplit() into a 538 complete URL as a string. The data argument can be any five-item iterable. 539 This may result in a slightly different, but equivalent URL, if the URL that 540 was parsed originally had unnecessary delimiters (for example, a ? with an 541 empty query; the RFC states that these are equivalent).""" 542 scheme, netloc, url, query, fragment, _coerce_result = ( 543 _coerce_args(*components)) 544 if netloc or (scheme and scheme in uses_netloc and url[:2] != '//'): 545 if url and url[:1] != '/': url = '/' + url 546 url = '//' + (netloc or '') + url 547 if scheme: 548 url = scheme + ':' + url 549 if query: 550 url = url + '?' + query 551 if fragment: 552 url = url + '#' + fragment 553 return _coerce_result(url) 554 555def urljoin(base, url, allow_fragments=True): 556 """Join a base URL and a possibly relative URL to form an absolute 557 interpretation of the latter.""" 558 if not base: 559 return url 560 if not url: 561 return base 562 563 base, url, _coerce_result = _coerce_args(base, url) 564 bscheme, bnetloc, bpath, bparams, bquery, bfragment = \ 565 urlparse(base, '', allow_fragments) 566 scheme, netloc, path, params, query, fragment = \ 567 urlparse(url, bscheme, allow_fragments) 568 569 if scheme != bscheme or scheme not in uses_relative: 570 return _coerce_result(url) 571 if scheme in uses_netloc: 572 if netloc: 573 return _coerce_result(urlunparse((scheme, netloc, path, 574 params, query, fragment))) 575 netloc = bnetloc 576 577 if not path and not params: 578 path = bpath 579 params = bparams 580 if not query: 581 query = bquery 582 return _coerce_result(urlunparse((scheme, netloc, path, 583 params, query, fragment))) 584 585 base_parts = bpath.split('/') 586 if base_parts[-1] != '': 587 # the last item is not a directory, so will not be taken into account 588 # in resolving the relative path 589 del base_parts[-1] 590 591 # for rfc3986, ignore all base path should the first character be root. 592 if path[:1] == '/': 593 segments = path.split('/') 594 else: 595 segments = base_parts + path.split('/') 596 # filter out elements that would cause redundant slashes on re-joining 597 # the resolved_path 598 segments[1:-1] = filter(None, segments[1:-1]) 599 600 resolved_path = [] 601 602 for seg in segments: 603 if seg == '..': 604 try: 605 resolved_path.pop() 606 except IndexError: 607 # ignore any .. segments that would otherwise cause an IndexError 608 # when popped from resolved_path if resolving for rfc3986 609 pass 610 elif seg == '.': 611 continue 612 else: 613 resolved_path.append(seg) 614 615 if segments[-1] in ('.', '..'): 616 # do some post-processing here. if the last segment was a relative dir, 617 # then we need to append the trailing '/' 618 resolved_path.append('') 619 620 return _coerce_result(urlunparse((scheme, netloc, '/'.join( 621 resolved_path) or '/', params, query, fragment))) 622 623 624def urldefrag(url): 625 """Removes any existing fragment from URL. 626 627 Returns a tuple of the defragmented URL and the fragment. If 628 the URL contained no fragments, the second element is the 629 empty string. 630 """ 631 url, _coerce_result = _coerce_args(url) 632 if '#' in url: 633 s, n, p, a, q, frag = urlparse(url) 634 defrag = urlunparse((s, n, p, a, q, '')) 635 else: 636 frag = '' 637 defrag = url 638 return _coerce_result(DefragResult(defrag, frag)) 639 640_hexdig = '0123456789ABCDEFabcdef' 641_hextobyte = None 642 643def unquote_to_bytes(string): 644 """unquote_to_bytes('abc%20def') -> b'abc def'.""" 645 # Note: strings are encoded as UTF-8. This is only an issue if it contains 646 # unescaped non-ASCII characters, which URIs should not. 647 if not string: 648 # Is it a string-like object? 649 string.split 650 return b'' 651 if isinstance(string, str): 652 string = string.encode('utf-8') 653 bits = string.split(b'%') 654 if len(bits) == 1: 655 return string 656 res = [bits[0]] 657 append = res.append 658 # Delay the initialization of the table to not waste memory 659 # if the function is never called 660 global _hextobyte 661 if _hextobyte is None: 662 _hextobyte = {(a + b).encode(): bytes.fromhex(a + b) 663 for a in _hexdig for b in _hexdig} 664 for item in bits[1:]: 665 try: 666 append(_hextobyte[item[:2]]) 667 append(item[2:]) 668 except KeyError: 669 append(b'%') 670 append(item) 671 return b''.join(res) 672 673_asciire = re.compile('([\x00-\x7f]+)') 674 675def unquote(string, encoding='utf-8', errors='replace'): 676 """Replace %xx escapes by their single-character equivalent. The optional 677 encoding and errors parameters specify how to decode percent-encoded 678 sequences into Unicode characters, as accepted by the bytes.decode() 679 method. 680 By default, percent-encoded sequences are decoded with UTF-8, and invalid 681 sequences are replaced by a placeholder character. 682 683 unquote('abc%20def') -> 'abc def'. 684 """ 685 if isinstance(string, bytes): 686 return unquote_to_bytes(string).decode(encoding, errors) 687 if '%' not in string: 688 string.split 689 return string 690 if encoding is None: 691 encoding = 'utf-8' 692 if errors is None: 693 errors = 'replace' 694 bits = _asciire.split(string) 695 res = [bits[0]] 696 append = res.append 697 for i in range(1, len(bits), 2): 698 append(unquote_to_bytes(bits[i]).decode(encoding, errors)) 699 append(bits[i + 1]) 700 return ''.join(res) 701 702 703def parse_qs(qs, keep_blank_values=False, strict_parsing=False, 704 encoding='utf-8', errors='replace', max_num_fields=None, separator='&'): 705 """Parse a query given as a string argument. 706 707 Arguments: 708 709 qs: percent-encoded query string to be parsed 710 711 keep_blank_values: flag indicating whether blank values in 712 percent-encoded queries should be treated as blank strings. 713 A true value indicates that blanks should be retained as 714 blank strings. The default false value indicates that 715 blank values are to be ignored and treated as if they were 716 not included. 717 718 strict_parsing: flag indicating what to do with parsing errors. 719 If false (the default), errors are silently ignored. 720 If true, errors raise a ValueError exception. 721 722 encoding and errors: specify how to decode percent-encoded sequences 723 into Unicode characters, as accepted by the bytes.decode() method. 724 725 max_num_fields: int. If set, then throws a ValueError if there 726 are more than n fields read by parse_qsl(). 727 728 separator: str. The symbol to use for separating the query arguments. 729 Defaults to &. 730 731 Returns a dictionary. 732 """ 733 parsed_result = {} 734 pairs = parse_qsl(qs, keep_blank_values, strict_parsing, 735 encoding=encoding, errors=errors, 736 max_num_fields=max_num_fields, separator=separator) 737 for name, value in pairs: 738 if name in parsed_result: 739 parsed_result[name].append(value) 740 else: 741 parsed_result[name] = [value] 742 return parsed_result 743 744 745def parse_qsl(qs, keep_blank_values=False, strict_parsing=False, 746 encoding='utf-8', errors='replace', max_num_fields=None, separator='&'): 747 """Parse a query given as a string argument. 748 749 Arguments: 750 751 qs: percent-encoded query string to be parsed 752 753 keep_blank_values: flag indicating whether blank values in 754 percent-encoded queries should be treated as blank strings. 755 A true value indicates that blanks should be retained as blank 756 strings. The default false value indicates that blank values 757 are to be ignored and treated as if they were not included. 758 759 strict_parsing: flag indicating what to do with parsing errors. If 760 false (the default), errors are silently ignored. If true, 761 errors raise a ValueError exception. 762 763 encoding and errors: specify how to decode percent-encoded sequences 764 into Unicode characters, as accepted by the bytes.decode() method. 765 766 max_num_fields: int. If set, then throws a ValueError 767 if there are more than n fields read by parse_qsl(). 768 769 separator: str. The symbol to use for separating the query arguments. 770 Defaults to &. 771 772 Returns a list, as G-d intended. 773 """ 774 qs, _coerce_result = _coerce_args(qs) 775 separator, _ = _coerce_args(separator) 776 777 if not separator or (not isinstance(separator, (str, bytes))): 778 raise ValueError("Separator must be of type string or bytes.") 779 780 # If max_num_fields is defined then check that the number of fields 781 # is less than max_num_fields. This prevents a memory exhaustion DOS 782 # attack via post bodies with many fields. 783 if max_num_fields is not None: 784 num_fields = 1 + qs.count(separator) if qs else 0 785 if max_num_fields < num_fields: 786 raise ValueError('Max number of fields exceeded') 787 788 r = [] 789 query_args = qs.split(separator) if qs else [] 790 for name_value in query_args: 791 if not name_value and not strict_parsing: 792 continue 793 nv = name_value.split('=', 1) 794 if len(nv) != 2: 795 if strict_parsing: 796 raise ValueError("bad query field: %r" % (name_value,)) 797 # Handle case of a control-name with no equal sign 798 if keep_blank_values: 799 nv.append('') 800 else: 801 continue 802 if len(nv[1]) or keep_blank_values: 803 name = nv[0].replace('+', ' ') 804 name = unquote(name, encoding=encoding, errors=errors) 805 name = _coerce_result(name) 806 value = nv[1].replace('+', ' ') 807 value = unquote(value, encoding=encoding, errors=errors) 808 value = _coerce_result(value) 809 r.append((name, value)) 810 return r 811 812def unquote_plus(string, encoding='utf-8', errors='replace'): 813 """Like unquote(), but also replace plus signs by spaces, as required for 814 unquoting HTML form values. 815 816 unquote_plus('%7e/abc+def') -> '~/abc def' 817 """ 818 string = string.replace('+', ' ') 819 return unquote(string, encoding, errors) 820 821_ALWAYS_SAFE = frozenset(b'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 822 b'abcdefghijklmnopqrstuvwxyz' 823 b'0123456789' 824 b'_.-~') 825_ALWAYS_SAFE_BYTES = bytes(_ALWAYS_SAFE) 826 827def __getattr__(name): 828 if name == 'Quoter': 829 warnings.warn('Deprecated in 3.11. ' 830 'urllib.parse.Quoter will be removed in Python 3.14. ' 831 'It was not intended to be a public API.', 832 DeprecationWarning, stacklevel=2) 833 return _Quoter 834 raise AttributeError(f'module {__name__!r} has no attribute {name!r}') 835 836class _Quoter(dict): 837 """A mapping from bytes numbers (in range(0,256)) to strings. 838 839 String values are percent-encoded byte values, unless the key < 128, and 840 in either of the specified safe set, or the always safe set. 841 """ 842 # Keeps a cache internally, via __missing__, for efficiency (lookups 843 # of cached keys don't call Python code at all). 844 def __init__(self, safe): 845 """safe: bytes object.""" 846 self.safe = _ALWAYS_SAFE.union(safe) 847 848 def __repr__(self): 849 return f"<Quoter {dict(self)!r}>" 850 851 def __missing__(self, b): 852 # Handle a cache miss. Store quoted string in cache and return. 853 res = chr(b) if b in self.safe else '%{:02X}'.format(b) 854 self[b] = res 855 return res 856 857def quote(string, safe='/', encoding=None, errors=None): 858 """quote('abc def') -> 'abc%20def' 859 860 Each part of a URL, e.g. the path info, the query, etc., has a 861 different set of reserved characters that must be quoted. The 862 quote function offers a cautious (not minimal) way to quote a 863 string for most of these parts. 864 865 RFC 3986 Uniform Resource Identifier (URI): Generic Syntax lists 866 the following (un)reserved characters. 867 868 unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~" 869 reserved = gen-delims / sub-delims 870 gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@" 871 sub-delims = "!" / "$" / "&" / "'" / "(" / ")" 872 / "*" / "+" / "," / ";" / "=" 873 874 Each of the reserved characters is reserved in some component of a URL, 875 but not necessarily in all of them. 876 877 The quote function %-escapes all characters that are neither in the 878 unreserved chars ("always safe") nor the additional chars set via the 879 safe arg. 880 881 The default for the safe arg is '/'. The character is reserved, but in 882 typical usage the quote function is being called on a path where the 883 existing slash characters are to be preserved. 884 885 Python 3.7 updates from using RFC 2396 to RFC 3986 to quote URL strings. 886 Now, "~" is included in the set of unreserved characters. 887 888 string and safe may be either str or bytes objects. encoding and errors 889 must not be specified if string is a bytes object. 890 891 The optional encoding and errors parameters specify how to deal with 892 non-ASCII characters, as accepted by the str.encode method. 893 By default, encoding='utf-8' (characters are encoded with UTF-8), and 894 errors='strict' (unsupported characters raise a UnicodeEncodeError). 895 """ 896 if isinstance(string, str): 897 if not string: 898 return string 899 if encoding is None: 900 encoding = 'utf-8' 901 if errors is None: 902 errors = 'strict' 903 string = string.encode(encoding, errors) 904 else: 905 if encoding is not None: 906 raise TypeError("quote() doesn't support 'encoding' for bytes") 907 if errors is not None: 908 raise TypeError("quote() doesn't support 'errors' for bytes") 909 return quote_from_bytes(string, safe) 910 911def quote_plus(string, safe='', encoding=None, errors=None): 912 """Like quote(), but also replace ' ' with '+', as required for quoting 913 HTML form values. Plus signs in the original string are escaped unless 914 they are included in safe. It also does not have safe default to '/'. 915 """ 916 # Check if ' ' in string, where string may either be a str or bytes. If 917 # there are no spaces, the regular quote will produce the right answer. 918 if ((isinstance(string, str) and ' ' not in string) or 919 (isinstance(string, bytes) and b' ' not in string)): 920 return quote(string, safe, encoding, errors) 921 if isinstance(safe, str): 922 space = ' ' 923 else: 924 space = b' ' 925 string = quote(string, safe + space, encoding, errors) 926 return string.replace(' ', '+') 927 928# Expectation: A typical program is unlikely to create more than 5 of these. 929@functools.lru_cache 930def _byte_quoter_factory(safe): 931 return _Quoter(safe).__getitem__ 932 933def quote_from_bytes(bs, safe='/'): 934 """Like quote(), but accepts a bytes object rather than a str, and does 935 not perform string-to-bytes encoding. It always returns an ASCII string. 936 quote_from_bytes(b'abc def\x3f') -> 'abc%20def%3f' 937 """ 938 if not isinstance(bs, (bytes, bytearray)): 939 raise TypeError("quote_from_bytes() expected bytes") 940 if not bs: 941 return '' 942 if isinstance(safe, str): 943 # Normalize 'safe' by converting to bytes and removing non-ASCII chars 944 safe = safe.encode('ascii', 'ignore') 945 else: 946 # List comprehensions are faster than generator expressions. 947 safe = bytes([c for c in safe if c < 128]) 948 if not bs.rstrip(_ALWAYS_SAFE_BYTES + safe): 949 return bs.decode() 950 quoter = _byte_quoter_factory(safe) 951 return ''.join([quoter(char) for char in bs]) 952 953def urlencode(query, doseq=False, safe='', encoding=None, errors=None, 954 quote_via=quote_plus): 955 """Encode a dict or sequence of two-element tuples into a URL query string. 956 957 If any values in the query arg are sequences and doseq is true, each 958 sequence element is converted to a separate parameter. 959 960 If the query arg is a sequence of two-element tuples, the order of the 961 parameters in the output will match the order of parameters in the 962 input. 963 964 The components of a query arg may each be either a string or a bytes type. 965 966 The safe, encoding, and errors parameters are passed down to the function 967 specified by quote_via (encoding and errors only if a component is a str). 968 """ 969 970 if hasattr(query, "items"): 971 query = query.items() 972 else: 973 # It's a bother at times that strings and string-like objects are 974 # sequences. 975 try: 976 # non-sequence items should not work with len() 977 # non-empty strings will fail this 978 if len(query) and not isinstance(query[0], tuple): 979 raise TypeError 980 # Zero-length sequences of all types will get here and succeed, 981 # but that's a minor nit. Since the original implementation 982 # allowed empty dicts that type of behavior probably should be 983 # preserved for consistency 984 except TypeError as err: 985 raise TypeError("not a valid non-string sequence " 986 "or mapping object") from err 987 988 l = [] 989 if not doseq: 990 for k, v in query: 991 if isinstance(k, bytes): 992 k = quote_via(k, safe) 993 else: 994 k = quote_via(str(k), safe, encoding, errors) 995 996 if isinstance(v, bytes): 997 v = quote_via(v, safe) 998 else: 999 v = quote_via(str(v), safe, encoding, errors) 1000 l.append(k + '=' + v) 1001 else: 1002 for k, v in query: 1003 if isinstance(k, bytes): 1004 k = quote_via(k, safe) 1005 else: 1006 k = quote_via(str(k), safe, encoding, errors) 1007 1008 if isinstance(v, bytes): 1009 v = quote_via(v, safe) 1010 l.append(k + '=' + v) 1011 elif isinstance(v, str): 1012 v = quote_via(v, safe, encoding, errors) 1013 l.append(k + '=' + v) 1014 else: 1015 try: 1016 # Is this a sufficient test for sequence-ness? 1017 x = len(v) 1018 except TypeError: 1019 # not a sequence 1020 v = quote_via(str(v), safe, encoding, errors) 1021 l.append(k + '=' + v) 1022 else: 1023 # loop over the sequence 1024 for elt in v: 1025 if isinstance(elt, bytes): 1026 elt = quote_via(elt, safe) 1027 else: 1028 elt = quote_via(str(elt), safe, encoding, errors) 1029 l.append(k + '=' + elt) 1030 return '&'.join(l) 1031 1032 1033def to_bytes(url): 1034 warnings.warn("urllib.parse.to_bytes() is deprecated as of 3.8", 1035 DeprecationWarning, stacklevel=2) 1036 return _to_bytes(url) 1037 1038 1039def _to_bytes(url): 1040 """to_bytes(u"URL") --> 'URL'.""" 1041 # Most URL schemes require ASCII. If that changes, the conversion 1042 # can be relaxed. 1043 # XXX get rid of to_bytes() 1044 if isinstance(url, str): 1045 try: 1046 url = url.encode("ASCII").decode() 1047 except UnicodeError: 1048 raise UnicodeError("URL " + repr(url) + 1049 " contains non-ASCII characters") 1050 return url 1051 1052 1053def unwrap(url): 1054 """Transform a string like '<URL:scheme://host/path>' into 'scheme://host/path'. 1055 1056 The string is returned unchanged if it's not a wrapped URL. 1057 """ 1058 url = str(url).strip() 1059 if url[:1] == '<' and url[-1:] == '>': 1060 url = url[1:-1].strip() 1061 if url[:4] == 'URL:': 1062 url = url[4:].strip() 1063 return url 1064 1065 1066def splittype(url): 1067 warnings.warn("urllib.parse.splittype() is deprecated as of 3.8, " 1068 "use urllib.parse.urlparse() instead", 1069 DeprecationWarning, stacklevel=2) 1070 return _splittype(url) 1071 1072 1073_typeprog = None 1074def _splittype(url): 1075 """splittype('type:opaquestring') --> 'type', 'opaquestring'.""" 1076 global _typeprog 1077 if _typeprog is None: 1078 _typeprog = re.compile('([^/:]+):(.*)', re.DOTALL) 1079 1080 match = _typeprog.match(url) 1081 if match: 1082 scheme, data = match.groups() 1083 return scheme.lower(), data 1084 return None, url 1085 1086 1087def splithost(url): 1088 warnings.warn("urllib.parse.splithost() is deprecated as of 3.8, " 1089 "use urllib.parse.urlparse() instead", 1090 DeprecationWarning, stacklevel=2) 1091 return _splithost(url) 1092 1093 1094_hostprog = None 1095def _splithost(url): 1096 """splithost('//host[:port]/path') --> 'host[:port]', '/path'.""" 1097 global _hostprog 1098 if _hostprog is None: 1099 _hostprog = re.compile('//([^/#?]*)(.*)', re.DOTALL) 1100 1101 match = _hostprog.match(url) 1102 if match: 1103 host_port, path = match.groups() 1104 if path and path[0] != '/': 1105 path = '/' + path 1106 return host_port, path 1107 return None, url 1108 1109 1110def splituser(host): 1111 warnings.warn("urllib.parse.splituser() is deprecated as of 3.8, " 1112 "use urllib.parse.urlparse() instead", 1113 DeprecationWarning, stacklevel=2) 1114 return _splituser(host) 1115 1116 1117def _splituser(host): 1118 """splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'.""" 1119 user, delim, host = host.rpartition('@') 1120 return (user if delim else None), host 1121 1122 1123def splitpasswd(user): 1124 warnings.warn("urllib.parse.splitpasswd() is deprecated as of 3.8, " 1125 "use urllib.parse.urlparse() instead", 1126 DeprecationWarning, stacklevel=2) 1127 return _splitpasswd(user) 1128 1129 1130def _splitpasswd(user): 1131 """splitpasswd('user:passwd') -> 'user', 'passwd'.""" 1132 user, delim, passwd = user.partition(':') 1133 return user, (passwd if delim else None) 1134 1135 1136def splitport(host): 1137 warnings.warn("urllib.parse.splitport() is deprecated as of 3.8, " 1138 "use urllib.parse.urlparse() instead", 1139 DeprecationWarning, stacklevel=2) 1140 return _splitport(host) 1141 1142 1143# splittag('/path#tag') --> '/path', 'tag' 1144_portprog = None 1145def _splitport(host): 1146 """splitport('host:port') --> 'host', 'port'.""" 1147 global _portprog 1148 if _portprog is None: 1149 _portprog = re.compile('(.*):([0-9]*)', re.DOTALL) 1150 1151 match = _portprog.fullmatch(host) 1152 if match: 1153 host, port = match.groups() 1154 if port: 1155 return host, port 1156 return host, None 1157 1158 1159def splitnport(host, defport=-1): 1160 warnings.warn("urllib.parse.splitnport() is deprecated as of 3.8, " 1161 "use urllib.parse.urlparse() instead", 1162 DeprecationWarning, stacklevel=2) 1163 return _splitnport(host, defport) 1164 1165 1166def _splitnport(host, defport=-1): 1167 """Split host and port, returning numeric port. 1168 Return given default port if no ':' found; defaults to -1. 1169 Return numerical port if a valid number is found after ':'. 1170 Return None if ':' but not a valid number.""" 1171 host, delim, port = host.rpartition(':') 1172 if not delim: 1173 host = port 1174 elif port: 1175 if port.isdigit() and port.isascii(): 1176 nport = int(port) 1177 else: 1178 nport = None 1179 return host, nport 1180 return host, defport 1181 1182 1183def splitquery(url): 1184 warnings.warn("urllib.parse.splitquery() is deprecated as of 3.8, " 1185 "use urllib.parse.urlparse() instead", 1186 DeprecationWarning, stacklevel=2) 1187 return _splitquery(url) 1188 1189 1190def _splitquery(url): 1191 """splitquery('/path?query') --> '/path', 'query'.""" 1192 path, delim, query = url.rpartition('?') 1193 if delim: 1194 return path, query 1195 return url, None 1196 1197 1198def splittag(url): 1199 warnings.warn("urllib.parse.splittag() is deprecated as of 3.8, " 1200 "use urllib.parse.urlparse() instead", 1201 DeprecationWarning, stacklevel=2) 1202 return _splittag(url) 1203 1204 1205def _splittag(url): 1206 """splittag('/path#tag') --> '/path', 'tag'.""" 1207 path, delim, tag = url.rpartition('#') 1208 if delim: 1209 return path, tag 1210 return url, None 1211 1212 1213def splitattr(url): 1214 warnings.warn("urllib.parse.splitattr() is deprecated as of 3.8, " 1215 "use urllib.parse.urlparse() instead", 1216 DeprecationWarning, stacklevel=2) 1217 return _splitattr(url) 1218 1219 1220def _splitattr(url): 1221 """splitattr('/path;attr1=value1;attr2=value2;...') -> 1222 '/path', ['attr1=value1', 'attr2=value2', ...].""" 1223 words = url.split(';') 1224 return words[0], words[1:] 1225 1226 1227def splitvalue(attr): 1228 warnings.warn("urllib.parse.splitvalue() is deprecated as of 3.8, " 1229 "use urllib.parse.parse_qsl() instead", 1230 DeprecationWarning, stacklevel=2) 1231 return _splitvalue(attr) 1232 1233 1234def _splitvalue(attr): 1235 """splitvalue('attr=value') --> 'attr', 'value'.""" 1236 attr, delim, value = attr.partition('=') 1237 return attr, (value if delim else None) 1238