• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1"""functools.py - Tools for working with functions and callable objects
2"""
3# Python module wrapper for _functools C module
4# to allow utilities written in Python to be added
5# to the functools module.
6# Written by Nick Coghlan <ncoghlan at gmail.com>,
7# Raymond Hettinger <python at rcn.com>,
8# and Łukasz Langa <lukasz at langa.pl>.
9#   Copyright (C) 2006-2013 Python Software Foundation.
10# See C source code for _functools credits/copyright
11
12__all__ = ['update_wrapper', 'wraps', 'WRAPPER_ASSIGNMENTS', 'WRAPPER_UPDATES',
13           'total_ordering', 'cache', 'cmp_to_key', 'lru_cache', 'reduce',
14           'partial', 'partialmethod', 'singledispatch', 'singledispatchmethod',
15           'cached_property']
16
17from abc import get_cache_token
18from collections import namedtuple
19# import types, weakref  # Deferred to single_dispatch()
20from reprlib import recursive_repr
21from _thread import RLock
22
23# Avoid importing types, so we can speedup import time
24GenericAlias = type(list[int])
25
26################################################################################
27### update_wrapper() and wraps() decorator
28################################################################################
29
30# update_wrapper() and wraps() are tools to help write
31# wrapper functions that can handle naive introspection
32
33WRAPPER_ASSIGNMENTS = ('__module__', '__name__', '__qualname__', '__doc__',
34                       '__annotations__', '__type_params__')
35WRAPPER_UPDATES = ('__dict__',)
36def update_wrapper(wrapper,
37                   wrapped,
38                   assigned = WRAPPER_ASSIGNMENTS,
39                   updated = WRAPPER_UPDATES):
40    """Update a wrapper function to look like the wrapped function
41
42       wrapper is the function to be updated
43       wrapped is the original function
44       assigned is a tuple naming the attributes assigned directly
45       from the wrapped function to the wrapper function (defaults to
46       functools.WRAPPER_ASSIGNMENTS)
47       updated is a tuple naming the attributes of the wrapper that
48       are updated with the corresponding attribute from the wrapped
49       function (defaults to functools.WRAPPER_UPDATES)
50    """
51    for attr in assigned:
52        try:
53            value = getattr(wrapped, attr)
54        except AttributeError:
55            pass
56        else:
57            setattr(wrapper, attr, value)
58    for attr in updated:
59        getattr(wrapper, attr).update(getattr(wrapped, attr, {}))
60    # Issue #17482: set __wrapped__ last so we don't inadvertently copy it
61    # from the wrapped function when updating __dict__
62    wrapper.__wrapped__ = wrapped
63    # Return the wrapper so this can be used as a decorator via partial()
64    return wrapper
65
66def wraps(wrapped,
67          assigned = WRAPPER_ASSIGNMENTS,
68          updated = WRAPPER_UPDATES):
69    """Decorator factory to apply update_wrapper() to a wrapper function
70
71       Returns a decorator that invokes update_wrapper() with the decorated
72       function as the wrapper argument and the arguments to wraps() as the
73       remaining arguments. Default arguments are as for update_wrapper().
74       This is a convenience function to simplify applying partial() to
75       update_wrapper().
76    """
77    return partial(update_wrapper, wrapped=wrapped,
78                   assigned=assigned, updated=updated)
79
80
81################################################################################
82### total_ordering class decorator
83################################################################################
84
85# The total ordering functions all invoke the root magic method directly
86# rather than using the corresponding operator.  This avoids possible
87# infinite recursion that could occur when the operator dispatch logic
88# detects a NotImplemented result and then calls a reflected method.
89
90def _gt_from_lt(self, other):
91    'Return a > b.  Computed by @total_ordering from (not a < b) and (a != b).'
92    op_result = type(self).__lt__(self, other)
93    if op_result is NotImplemented:
94        return op_result
95    return not op_result and self != other
96
97def _le_from_lt(self, other):
98    'Return a <= b.  Computed by @total_ordering from (a < b) or (a == b).'
99    op_result = type(self).__lt__(self, other)
100    if op_result is NotImplemented:
101        return op_result
102    return op_result or self == other
103
104def _ge_from_lt(self, other):
105    'Return a >= b.  Computed by @total_ordering from (not a < b).'
106    op_result = type(self).__lt__(self, other)
107    if op_result is NotImplemented:
108        return op_result
109    return not op_result
110
111def _ge_from_le(self, other):
112    'Return a >= b.  Computed by @total_ordering from (not a <= b) or (a == b).'
113    op_result = type(self).__le__(self, other)
114    if op_result is NotImplemented:
115        return op_result
116    return not op_result or self == other
117
118def _lt_from_le(self, other):
119    'Return a < b.  Computed by @total_ordering from (a <= b) and (a != b).'
120    op_result = type(self).__le__(self, other)
121    if op_result is NotImplemented:
122        return op_result
123    return op_result and self != other
124
125def _gt_from_le(self, other):
126    'Return a > b.  Computed by @total_ordering from (not a <= b).'
127    op_result = type(self).__le__(self, other)
128    if op_result is NotImplemented:
129        return op_result
130    return not op_result
131
132def _lt_from_gt(self, other):
133    'Return a < b.  Computed by @total_ordering from (not a > b) and (a != b).'
134    op_result = type(self).__gt__(self, other)
135    if op_result is NotImplemented:
136        return op_result
137    return not op_result and self != other
138
139def _ge_from_gt(self, other):
140    'Return a >= b.  Computed by @total_ordering from (a > b) or (a == b).'
141    op_result = type(self).__gt__(self, other)
142    if op_result is NotImplemented:
143        return op_result
144    return op_result or self == other
145
146def _le_from_gt(self, other):
147    'Return a <= b.  Computed by @total_ordering from (not a > b).'
148    op_result = type(self).__gt__(self, other)
149    if op_result is NotImplemented:
150        return op_result
151    return not op_result
152
153def _le_from_ge(self, other):
154    'Return a <= b.  Computed by @total_ordering from (not a >= b) or (a == b).'
155    op_result = type(self).__ge__(self, other)
156    if op_result is NotImplemented:
157        return op_result
158    return not op_result or self == other
159
160def _gt_from_ge(self, other):
161    'Return a > b.  Computed by @total_ordering from (a >= b) and (a != b).'
162    op_result = type(self).__ge__(self, other)
163    if op_result is NotImplemented:
164        return op_result
165    return op_result and self != other
166
167def _lt_from_ge(self, other):
168    'Return a < b.  Computed by @total_ordering from (not a >= b).'
169    op_result = type(self).__ge__(self, other)
170    if op_result is NotImplemented:
171        return op_result
172    return not op_result
173
174_convert = {
175    '__lt__': [('__gt__', _gt_from_lt),
176               ('__le__', _le_from_lt),
177               ('__ge__', _ge_from_lt)],
178    '__le__': [('__ge__', _ge_from_le),
179               ('__lt__', _lt_from_le),
180               ('__gt__', _gt_from_le)],
181    '__gt__': [('__lt__', _lt_from_gt),
182               ('__ge__', _ge_from_gt),
183               ('__le__', _le_from_gt)],
184    '__ge__': [('__le__', _le_from_ge),
185               ('__gt__', _gt_from_ge),
186               ('__lt__', _lt_from_ge)]
187}
188
189def total_ordering(cls):
190    """Class decorator that fills in missing ordering methods"""
191    # Find user-defined comparisons (not those inherited from object).
192    roots = {op for op in _convert if getattr(cls, op, None) is not getattr(object, op, None)}
193    if not roots:
194        raise ValueError('must define at least one ordering operation: < > <= >=')
195    root = max(roots)       # prefer __lt__ to __le__ to __gt__ to __ge__
196    for opname, opfunc in _convert[root]:
197        if opname not in roots:
198            opfunc.__name__ = opname
199            setattr(cls, opname, opfunc)
200    return cls
201
202
203################################################################################
204### cmp_to_key() function converter
205################################################################################
206
207def cmp_to_key(mycmp):
208    """Convert a cmp= function into a key= function"""
209    class K(object):
210        __slots__ = ['obj']
211        def __init__(self, obj):
212            self.obj = obj
213        def __lt__(self, other):
214            return mycmp(self.obj, other.obj) < 0
215        def __gt__(self, other):
216            return mycmp(self.obj, other.obj) > 0
217        def __eq__(self, other):
218            return mycmp(self.obj, other.obj) == 0
219        def __le__(self, other):
220            return mycmp(self.obj, other.obj) <= 0
221        def __ge__(self, other):
222            return mycmp(self.obj, other.obj) >= 0
223        __hash__ = None
224    return K
225
226try:
227    from _functools import cmp_to_key
228except ImportError:
229    pass
230
231
232################################################################################
233### reduce() sequence to a single item
234################################################################################
235
236_initial_missing = object()
237
238def reduce(function, sequence, initial=_initial_missing):
239    """
240    reduce(function, iterable[, initial], /) -> value
241
242    Apply a function of two arguments cumulatively to the items of an iterable, from left to right.
243
244    This effectively reduces the iterable to a single value.  If initial is present,
245    it is placed before the items of the iterable in the calculation, and serves as
246    a default when the iterable is empty.
247
248    For example, reduce(lambda x, y: x+y, [1, 2, 3, 4, 5])
249    calculates ((((1 + 2) + 3) + 4) + 5).
250    """
251
252    it = iter(sequence)
253
254    if initial is _initial_missing:
255        try:
256            value = next(it)
257        except StopIteration:
258            raise TypeError(
259                "reduce() of empty iterable with no initial value") from None
260    else:
261        value = initial
262
263    for element in it:
264        value = function(value, element)
265
266    return value
267
268try:
269    from _functools import reduce
270except ImportError:
271    pass
272
273
274################################################################################
275### partial() argument application
276################################################################################
277
278# Purely functional, no descriptor behaviour
279class partial:
280    """New function with partial application of the given arguments
281    and keywords.
282    """
283
284    __slots__ = "func", "args", "keywords", "__dict__", "__weakref__"
285
286    def __new__(cls, func, /, *args, **keywords):
287        if not callable(func):
288            raise TypeError("the first argument must be callable")
289
290        if isinstance(func, partial):
291            args = func.args + args
292            keywords = {**func.keywords, **keywords}
293            func = func.func
294
295        self = super(partial, cls).__new__(cls)
296
297        self.func = func
298        self.args = args
299        self.keywords = keywords
300        return self
301
302    def __call__(self, /, *args, **keywords):
303        keywords = {**self.keywords, **keywords}
304        return self.func(*self.args, *args, **keywords)
305
306    @recursive_repr()
307    def __repr__(self):
308        cls = type(self)
309        qualname = cls.__qualname__
310        module = cls.__module__
311        args = [repr(self.func)]
312        args.extend(repr(x) for x in self.args)
313        args.extend(f"{k}={v!r}" for (k, v) in self.keywords.items())
314        return f"{module}.{qualname}({', '.join(args)})"
315
316    def __get__(self, obj, objtype=None):
317        if obj is None:
318            return self
319        import warnings
320        warnings.warn('functools.partial will be a method descriptor in '
321                      'future Python versions; wrap it in staticmethod() '
322                      'if you want to preserve the old behavior',
323                      FutureWarning, 2)
324        return self
325
326    def __reduce__(self):
327        return type(self), (self.func,), (self.func, self.args,
328               self.keywords or None, self.__dict__ or None)
329
330    def __setstate__(self, state):
331        if not isinstance(state, tuple):
332            raise TypeError("argument to __setstate__ must be a tuple")
333        if len(state) != 4:
334            raise TypeError(f"expected 4 items in state, got {len(state)}")
335        func, args, kwds, namespace = state
336        if (not callable(func) or not isinstance(args, tuple) or
337           (kwds is not None and not isinstance(kwds, dict)) or
338           (namespace is not None and not isinstance(namespace, dict))):
339            raise TypeError("invalid partial state")
340
341        args = tuple(args) # just in case it's a subclass
342        if kwds is None:
343            kwds = {}
344        elif type(kwds) is not dict: # XXX does it need to be *exactly* dict?
345            kwds = dict(kwds)
346        if namespace is None:
347            namespace = {}
348
349        self.__dict__ = namespace
350        self.func = func
351        self.args = args
352        self.keywords = kwds
353
354try:
355    from _functools import partial
356except ImportError:
357    pass
358
359# Descriptor version
360class partialmethod(object):
361    """Method descriptor with partial application of the given arguments
362    and keywords.
363
364    Supports wrapping existing descriptors and handles non-descriptor
365    callables as instance methods.
366    """
367
368    def __init__(self, func, /, *args, **keywords):
369        if not callable(func) and not hasattr(func, "__get__"):
370            raise TypeError("{!r} is not callable or a descriptor"
371                                 .format(func))
372
373        # func could be a descriptor like classmethod which isn't callable,
374        # so we can't inherit from partial (it verifies func is callable)
375        if isinstance(func, partialmethod):
376            # flattening is mandatory in order to place cls/self before all
377            # other arguments
378            # it's also more efficient since only one function will be called
379            self.func = func.func
380            self.args = func.args + args
381            self.keywords = {**func.keywords, **keywords}
382        else:
383            self.func = func
384            self.args = args
385            self.keywords = keywords
386
387    def __repr__(self):
388        cls = type(self)
389        module = cls.__module__
390        qualname = cls.__qualname__
391        args = [repr(self.func)]
392        args.extend(map(repr, self.args))
393        args.extend(f"{k}={v!r}" for k, v in self.keywords.items())
394        return f"{module}.{qualname}({', '.join(args)})"
395
396    def _make_unbound_method(self):
397        def _method(cls_or_self, /, *args, **keywords):
398            keywords = {**self.keywords, **keywords}
399            return self.func(cls_or_self, *self.args, *args, **keywords)
400        _method.__isabstractmethod__ = self.__isabstractmethod__
401        _method.__partialmethod__ = self
402        return _method
403
404    def __get__(self, obj, cls=None):
405        get = getattr(self.func, "__get__", None)
406        result = None
407        if get is not None and not isinstance(self.func, partial):
408            new_func = get(obj, cls)
409            if new_func is not self.func:
410                # Assume __get__ returning something new indicates the
411                # creation of an appropriate callable
412                result = partial(new_func, *self.args, **self.keywords)
413                try:
414                    result.__self__ = new_func.__self__
415                except AttributeError:
416                    pass
417        if result is None:
418            # If the underlying descriptor didn't do anything, treat this
419            # like an instance method
420            result = self._make_unbound_method().__get__(obj, cls)
421        return result
422
423    @property
424    def __isabstractmethod__(self):
425        return getattr(self.func, "__isabstractmethod__", False)
426
427    __class_getitem__ = classmethod(GenericAlias)
428
429
430# Helper functions
431
432def _unwrap_partial(func):
433    while isinstance(func, partial):
434        func = func.func
435    return func
436
437def _unwrap_partialmethod(func):
438    prev = None
439    while func is not prev:
440        prev = func
441        while isinstance(getattr(func, "__partialmethod__", None), partialmethod):
442            func = func.__partialmethod__
443        while isinstance(func, partialmethod):
444            func = getattr(func, 'func')
445        func = _unwrap_partial(func)
446    return func
447
448################################################################################
449### LRU Cache function decorator
450################################################################################
451
452_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])
453
454class _HashedSeq(list):
455    """ This class guarantees that hash() will be called no more than once
456        per element.  This is important because the lru_cache() will hash
457        the key multiple times on a cache miss.
458
459    """
460
461    __slots__ = 'hashvalue'
462
463    def __init__(self, tup, hash=hash):
464        self[:] = tup
465        self.hashvalue = hash(tup)
466
467    def __hash__(self):
468        return self.hashvalue
469
470def _make_key(args, kwds, typed,
471             kwd_mark = (object(),),
472             fasttypes = {int, str},
473             tuple=tuple, type=type, len=len):
474    """Make a cache key from optionally typed positional and keyword arguments
475
476    The key is constructed in a way that is flat as possible rather than
477    as a nested structure that would take more memory.
478
479    If there is only a single argument and its data type is known to cache
480    its hash value, then that argument is returned without a wrapper.  This
481    saves space and improves lookup speed.
482
483    """
484    # All of code below relies on kwds preserving the order input by the user.
485    # Formerly, we sorted() the kwds before looping.  The new way is *much*
486    # faster; however, it means that f(x=1, y=2) will now be treated as a
487    # distinct call from f(y=2, x=1) which will be cached separately.
488    key = args
489    if kwds:
490        key += kwd_mark
491        for item in kwds.items():
492            key += item
493    if typed:
494        key += tuple(type(v) for v in args)
495        if kwds:
496            key += tuple(type(v) for v in kwds.values())
497    elif len(key) == 1 and type(key[0]) in fasttypes:
498        return key[0]
499    return _HashedSeq(key)
500
501def lru_cache(maxsize=128, typed=False):
502    """Least-recently-used cache decorator.
503
504    If *maxsize* is set to None, the LRU features are disabled and the cache
505    can grow without bound.
506
507    If *typed* is True, arguments of different types will be cached separately.
508    For example, f(decimal.Decimal("3.0")) and f(3.0) will be treated as
509    distinct calls with distinct results. Some types such as str and int may
510    be cached separately even when typed is false.
511
512    Arguments to the cached function must be hashable.
513
514    View the cache statistics named tuple (hits, misses, maxsize, currsize)
515    with f.cache_info().  Clear the cache and statistics with f.cache_clear().
516    Access the underlying function with f.__wrapped__.
517
518    See:  https://en.wikipedia.org/wiki/Cache_replacement_policies#Least_recently_used_(LRU)
519
520    """
521
522    # Users should only access the lru_cache through its public API:
523    #       cache_info, cache_clear, and f.__wrapped__
524    # The internals of the lru_cache are encapsulated for thread safety and
525    # to allow the implementation to change (including a possible C version).
526
527    if isinstance(maxsize, int):
528        # Negative maxsize is treated as 0
529        if maxsize < 0:
530            maxsize = 0
531    elif callable(maxsize) and isinstance(typed, bool):
532        # The user_function was passed in directly via the maxsize argument
533        user_function, maxsize = maxsize, 128
534        wrapper = _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo)
535        wrapper.cache_parameters = lambda : {'maxsize': maxsize, 'typed': typed}
536        return update_wrapper(wrapper, user_function)
537    elif maxsize is not None:
538        raise TypeError(
539            'Expected first argument to be an integer, a callable, or None')
540
541    def decorating_function(user_function):
542        wrapper = _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo)
543        wrapper.cache_parameters = lambda : {'maxsize': maxsize, 'typed': typed}
544        return update_wrapper(wrapper, user_function)
545
546    return decorating_function
547
548def _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo):
549    # Constants shared by all lru cache instances:
550    sentinel = object()          # unique object used to signal cache misses
551    make_key = _make_key         # build a key from the function arguments
552    PREV, NEXT, KEY, RESULT = 0, 1, 2, 3   # names for the link fields
553
554    cache = {}
555    hits = misses = 0
556    full = False
557    cache_get = cache.get    # bound method to lookup a key or return None
558    cache_len = cache.__len__  # get cache size without calling len()
559    lock = RLock()           # because linkedlist updates aren't threadsafe
560    root = []                # root of the circular doubly linked list
561    root[:] = [root, root, None, None]     # initialize by pointing to self
562
563    if maxsize == 0:
564
565        def wrapper(*args, **kwds):
566            # No caching -- just a statistics update
567            nonlocal misses
568            misses += 1
569            result = user_function(*args, **kwds)
570            return result
571
572    elif maxsize is None:
573
574        def wrapper(*args, **kwds):
575            # Simple caching without ordering or size limit
576            nonlocal hits, misses
577            key = make_key(args, kwds, typed)
578            result = cache_get(key, sentinel)
579            if result is not sentinel:
580                hits += 1
581                return result
582            misses += 1
583            result = user_function(*args, **kwds)
584            cache[key] = result
585            return result
586
587    else:
588
589        def wrapper(*args, **kwds):
590            # Size limited caching that tracks accesses by recency
591            nonlocal root, hits, misses, full
592            key = make_key(args, kwds, typed)
593            with lock:
594                link = cache_get(key)
595                if link is not None:
596                    # Move the link to the front of the circular queue
597                    link_prev, link_next, _key, result = link
598                    link_prev[NEXT] = link_next
599                    link_next[PREV] = link_prev
600                    last = root[PREV]
601                    last[NEXT] = root[PREV] = link
602                    link[PREV] = last
603                    link[NEXT] = root
604                    hits += 1
605                    return result
606                misses += 1
607            result = user_function(*args, **kwds)
608            with lock:
609                if key in cache:
610                    # Getting here means that this same key was added to the
611                    # cache while the lock was released.  Since the link
612                    # update is already done, we need only return the
613                    # computed result and update the count of misses.
614                    pass
615                elif full:
616                    # Use the old root to store the new key and result.
617                    oldroot = root
618                    oldroot[KEY] = key
619                    oldroot[RESULT] = result
620                    # Empty the oldest link and make it the new root.
621                    # Keep a reference to the old key and old result to
622                    # prevent their ref counts from going to zero during the
623                    # update. That will prevent potentially arbitrary object
624                    # clean-up code (i.e. __del__) from running while we're
625                    # still adjusting the links.
626                    root = oldroot[NEXT]
627                    oldkey = root[KEY]
628                    oldresult = root[RESULT]
629                    root[KEY] = root[RESULT] = None
630                    # Now update the cache dictionary.
631                    del cache[oldkey]
632                    # Save the potentially reentrant cache[key] assignment
633                    # for last, after the root and links have been put in
634                    # a consistent state.
635                    cache[key] = oldroot
636                else:
637                    # Put result in a new link at the front of the queue.
638                    last = root[PREV]
639                    link = [last, root, key, result]
640                    last[NEXT] = root[PREV] = cache[key] = link
641                    # Use the cache_len bound method instead of the len() function
642                    # which could potentially be wrapped in an lru_cache itself.
643                    full = (cache_len() >= maxsize)
644            return result
645
646    def cache_info():
647        """Report cache statistics"""
648        with lock:
649            return _CacheInfo(hits, misses, maxsize, cache_len())
650
651    def cache_clear():
652        """Clear the cache and cache statistics"""
653        nonlocal hits, misses, full
654        with lock:
655            cache.clear()
656            root[:] = [root, root, None, None]
657            hits = misses = 0
658            full = False
659
660    wrapper.cache_info = cache_info
661    wrapper.cache_clear = cache_clear
662    return wrapper
663
664try:
665    from _functools import _lru_cache_wrapper
666except ImportError:
667    pass
668
669
670################################################################################
671### cache -- simplified access to the infinity cache
672################################################################################
673
674def cache(user_function, /):
675    'Simple lightweight unbounded cache.  Sometimes called "memoize".'
676    return lru_cache(maxsize=None)(user_function)
677
678
679################################################################################
680### singledispatch() - single-dispatch generic function decorator
681################################################################################
682
683def _c3_merge(sequences):
684    """Merges MROs in *sequences* to a single MRO using the C3 algorithm.
685
686    Adapted from https://docs.python.org/3/howto/mro.html.
687
688    """
689    result = []
690    while True:
691        sequences = [s for s in sequences if s]   # purge empty sequences
692        if not sequences:
693            return result
694        for s1 in sequences:   # find merge candidates among seq heads
695            candidate = s1[0]
696            for s2 in sequences:
697                if candidate in s2[1:]:
698                    candidate = None
699                    break      # reject the current head, it appears later
700            else:
701                break
702        if candidate is None:
703            raise RuntimeError("Inconsistent hierarchy")
704        result.append(candidate)
705        # remove the chosen candidate
706        for seq in sequences:
707            if seq[0] == candidate:
708                del seq[0]
709
710def _c3_mro(cls, abcs=None):
711    """Computes the method resolution order using extended C3 linearization.
712
713    If no *abcs* are given, the algorithm works exactly like the built-in C3
714    linearization used for method resolution.
715
716    If given, *abcs* is a list of abstract base classes that should be inserted
717    into the resulting MRO. Unrelated ABCs are ignored and don't end up in the
718    result. The algorithm inserts ABCs where their functionality is introduced,
719    i.e. issubclass(cls, abc) returns True for the class itself but returns
720    False for all its direct base classes. Implicit ABCs for a given class
721    (either registered or inferred from the presence of a special method like
722    __len__) are inserted directly after the last ABC explicitly listed in the
723    MRO of said class. If two implicit ABCs end up next to each other in the
724    resulting MRO, their ordering depends on the order of types in *abcs*.
725
726    """
727    for i, base in enumerate(reversed(cls.__bases__)):
728        if hasattr(base, '__abstractmethods__'):
729            boundary = len(cls.__bases__) - i
730            break   # Bases up to the last explicit ABC are considered first.
731    else:
732        boundary = 0
733    abcs = list(abcs) if abcs else []
734    explicit_bases = list(cls.__bases__[:boundary])
735    abstract_bases = []
736    other_bases = list(cls.__bases__[boundary:])
737    for base in abcs:
738        if issubclass(cls, base) and not any(
739                issubclass(b, base) for b in cls.__bases__
740            ):
741            # If *cls* is the class that introduces behaviour described by
742            # an ABC *base*, insert said ABC to its MRO.
743            abstract_bases.append(base)
744    for base in abstract_bases:
745        abcs.remove(base)
746    explicit_c3_mros = [_c3_mro(base, abcs=abcs) for base in explicit_bases]
747    abstract_c3_mros = [_c3_mro(base, abcs=abcs) for base in abstract_bases]
748    other_c3_mros = [_c3_mro(base, abcs=abcs) for base in other_bases]
749    return _c3_merge(
750        [[cls]] +
751        explicit_c3_mros + abstract_c3_mros + other_c3_mros +
752        [explicit_bases] + [abstract_bases] + [other_bases]
753    )
754
755def _compose_mro(cls, types):
756    """Calculates the method resolution order for a given class *cls*.
757
758    Includes relevant abstract base classes (with their respective bases) from
759    the *types* iterable. Uses a modified C3 linearization algorithm.
760
761    """
762    bases = set(cls.__mro__)
763    # Remove entries which are already present in the __mro__ or unrelated.
764    def is_related(typ):
765        return (typ not in bases and hasattr(typ, '__mro__')
766                                 and not isinstance(typ, GenericAlias)
767                                 and issubclass(cls, typ))
768    types = [n for n in types if is_related(n)]
769    # Remove entries which are strict bases of other entries (they will end up
770    # in the MRO anyway.
771    def is_strict_base(typ):
772        for other in types:
773            if typ != other and typ in other.__mro__:
774                return True
775        return False
776    types = [n for n in types if not is_strict_base(n)]
777    # Subclasses of the ABCs in *types* which are also implemented by
778    # *cls* can be used to stabilize ABC ordering.
779    type_set = set(types)
780    mro = []
781    for typ in types:
782        found = []
783        for sub in typ.__subclasses__():
784            if sub not in bases and issubclass(cls, sub):
785                found.append([s for s in sub.__mro__ if s in type_set])
786        if not found:
787            mro.append(typ)
788            continue
789        # Favor subclasses with the biggest number of useful bases
790        found.sort(key=len, reverse=True)
791        for sub in found:
792            for subcls in sub:
793                if subcls not in mro:
794                    mro.append(subcls)
795    return _c3_mro(cls, abcs=mro)
796
797def _find_impl(cls, registry):
798    """Returns the best matching implementation from *registry* for type *cls*.
799
800    Where there is no registered implementation for a specific type, its method
801    resolution order is used to find a more generic implementation.
802
803    Note: if *registry* does not contain an implementation for the base
804    *object* type, this function may return None.
805
806    """
807    mro = _compose_mro(cls, registry.keys())
808    match = None
809    for t in mro:
810        if match is not None:
811            # If *match* is an implicit ABC but there is another unrelated,
812            # equally matching implicit ABC, refuse the temptation to guess.
813            if (t in registry and t not in cls.__mro__
814                              and match not in cls.__mro__
815                              and not issubclass(match, t)):
816                raise RuntimeError("Ambiguous dispatch: {} or {}".format(
817                    match, t))
818            break
819        if t in registry:
820            match = t
821    return registry.get(match)
822
823def singledispatch(func):
824    """Single-dispatch generic function decorator.
825
826    Transforms a function into a generic function, which can have different
827    behaviours depending upon the type of its first argument. The decorated
828    function acts as the default implementation, and additional
829    implementations can be registered using the register() attribute of the
830    generic function.
831    """
832    # There are many programs that use functools without singledispatch, so we
833    # trade-off making singledispatch marginally slower for the benefit of
834    # making start-up of such applications slightly faster.
835    import types, weakref
836
837    registry = {}
838    dispatch_cache = weakref.WeakKeyDictionary()
839    cache_token = None
840
841    def dispatch(cls):
842        """generic_func.dispatch(cls) -> <function implementation>
843
844        Runs the dispatch algorithm to return the best available implementation
845        for the given *cls* registered on *generic_func*.
846
847        """
848        nonlocal cache_token
849        if cache_token is not None:
850            current_token = get_cache_token()
851            if cache_token != current_token:
852                dispatch_cache.clear()
853                cache_token = current_token
854        try:
855            impl = dispatch_cache[cls]
856        except KeyError:
857            try:
858                impl = registry[cls]
859            except KeyError:
860                impl = _find_impl(cls, registry)
861            dispatch_cache[cls] = impl
862        return impl
863
864    def _is_union_type(cls):
865        from typing import get_origin, Union
866        return get_origin(cls) in {Union, types.UnionType}
867
868    def _is_valid_dispatch_type(cls):
869        if isinstance(cls, type):
870            return True
871        from typing import get_args
872        return (_is_union_type(cls) and
873                all(isinstance(arg, type) for arg in get_args(cls)))
874
875    def register(cls, func=None):
876        """generic_func.register(cls, func) -> func
877
878        Registers a new implementation for the given *cls* on a *generic_func*.
879
880        """
881        nonlocal cache_token
882        if _is_valid_dispatch_type(cls):
883            if func is None:
884                return lambda f: register(cls, f)
885        else:
886            if func is not None:
887                raise TypeError(
888                    f"Invalid first argument to `register()`. "
889                    f"{cls!r} is not a class or union type."
890                )
891            ann = getattr(cls, '__annotations__', {})
892            if not ann:
893                raise TypeError(
894                    f"Invalid first argument to `register()`: {cls!r}. "
895                    f"Use either `@register(some_class)` or plain `@register` "
896                    f"on an annotated function."
897                )
898            func = cls
899
900            # only import typing if annotation parsing is necessary
901            from typing import get_type_hints
902            argname, cls = next(iter(get_type_hints(func).items()))
903            if not _is_valid_dispatch_type(cls):
904                if _is_union_type(cls):
905                    raise TypeError(
906                        f"Invalid annotation for {argname!r}. "
907                        f"{cls!r} not all arguments are classes."
908                    )
909                else:
910                    raise TypeError(
911                        f"Invalid annotation for {argname!r}. "
912                        f"{cls!r} is not a class."
913                    )
914
915        if _is_union_type(cls):
916            from typing import get_args
917
918            for arg in get_args(cls):
919                registry[arg] = func
920        else:
921            registry[cls] = func
922        if cache_token is None and hasattr(cls, '__abstractmethods__'):
923            cache_token = get_cache_token()
924        dispatch_cache.clear()
925        return func
926
927    def wrapper(*args, **kw):
928        if not args:
929            raise TypeError(f'{funcname} requires at least '
930                            '1 positional argument')
931        return dispatch(args[0].__class__)(*args, **kw)
932
933    funcname = getattr(func, '__name__', 'singledispatch function')
934    registry[object] = func
935    wrapper.register = register
936    wrapper.dispatch = dispatch
937    wrapper.registry = types.MappingProxyType(registry)
938    wrapper._clear_cache = dispatch_cache.clear
939    update_wrapper(wrapper, func)
940    return wrapper
941
942
943# Descriptor version
944class singledispatchmethod:
945    """Single-dispatch generic method descriptor.
946
947    Supports wrapping existing descriptors and handles non-descriptor
948    callables as instance methods.
949    """
950
951    def __init__(self, func):
952        if not callable(func) and not hasattr(func, "__get__"):
953            raise TypeError(f"{func!r} is not callable or a descriptor")
954
955        self.dispatcher = singledispatch(func)
956        self.func = func
957
958        import weakref # see comment in singledispatch function
959        self._method_cache = weakref.WeakKeyDictionary()
960
961    def register(self, cls, method=None):
962        """generic_method.register(cls, func) -> func
963
964        Registers a new implementation for the given *cls* on a *generic_method*.
965        """
966        return self.dispatcher.register(cls, func=method)
967
968    def __get__(self, obj, cls=None):
969        if self._method_cache is not None:
970            try:
971                _method = self._method_cache[obj]
972            except TypeError:
973                self._method_cache = None
974            except KeyError:
975                pass
976            else:
977                return _method
978
979        dispatch = self.dispatcher.dispatch
980        funcname = getattr(self.func, '__name__', 'singledispatchmethod method')
981        def _method(*args, **kwargs):
982            if not args:
983                raise TypeError(f'{funcname} requires at least '
984                                '1 positional argument')
985            return dispatch(args[0].__class__).__get__(obj, cls)(*args, **kwargs)
986
987        _method.__isabstractmethod__ = self.__isabstractmethod__
988        _method.register = self.register
989        update_wrapper(_method, self.func)
990
991        if self._method_cache is not None:
992            self._method_cache[obj] = _method
993
994        return _method
995
996    @property
997    def __isabstractmethod__(self):
998        return getattr(self.func, '__isabstractmethod__', False)
999
1000
1001################################################################################
1002### cached_property() - property result cached as instance attribute
1003################################################################################
1004
1005_NOT_FOUND = object()
1006
1007class cached_property:
1008    def __init__(self, func):
1009        self.func = func
1010        self.attrname = None
1011        self.__doc__ = func.__doc__
1012        self.__module__ = func.__module__
1013
1014    def __set_name__(self, owner, name):
1015        if self.attrname is None:
1016            self.attrname = name
1017        elif name != self.attrname:
1018            raise TypeError(
1019                "Cannot assign the same cached_property to two different names "
1020                f"({self.attrname!r} and {name!r})."
1021            )
1022
1023    def __get__(self, instance, owner=None):
1024        if instance is None:
1025            return self
1026        if self.attrname is None:
1027            raise TypeError(
1028                "Cannot use cached_property instance without calling __set_name__ on it.")
1029        try:
1030            cache = instance.__dict__
1031        except AttributeError:  # not all objects have __dict__ (e.g. class defines slots)
1032            msg = (
1033                f"No '__dict__' attribute on {type(instance).__name__!r} "
1034                f"instance to cache {self.attrname!r} property."
1035            )
1036            raise TypeError(msg) from None
1037        val = cache.get(self.attrname, _NOT_FOUND)
1038        if val is _NOT_FOUND:
1039            val = self.func(instance)
1040            try:
1041                cache[self.attrname] = val
1042            except TypeError:
1043                msg = (
1044                    f"The '__dict__' attribute on {type(instance).__name__!r} instance "
1045                    f"does not support item assignment for caching {self.attrname!r} property."
1046                )
1047                raise TypeError(msg) from None
1048        return val
1049
1050    __class_getitem__ = classmethod(GenericAlias)
1051