• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1"""functools.py - Tools for working with functions and callable objects
2"""
3# Python module wrapper for _functools C module
4# to allow utilities written in Python to be added
5# to the functools module.
6# Written by Nick Coghlan <ncoghlan at gmail.com>,
7# Raymond Hettinger <python at rcn.com>,
8# and Łukasz Langa <lukasz at langa.pl>.
9#   Copyright (C) 2006-2013 Python Software Foundation.
10# See C source code for _functools credits/copyright
11
12__all__ = ['update_wrapper', 'wraps', 'WRAPPER_ASSIGNMENTS', 'WRAPPER_UPDATES',
13           'total_ordering', 'cmp_to_key', 'lru_cache', 'reduce', 'partial',
14           'partialmethod', 'singledispatch', 'singledispatchmethod']
15
16from abc import get_cache_token
17from collections import namedtuple
18# import types, weakref  # Deferred to single_dispatch()
19from reprlib import recursive_repr
20from _thread import RLock
21
22
23################################################################################
24### update_wrapper() and wraps() decorator
25################################################################################
26
27# update_wrapper() and wraps() are tools to help write
28# wrapper functions that can handle naive introspection
29
30WRAPPER_ASSIGNMENTS = ('__module__', '__name__', '__qualname__', '__doc__',
31                       '__annotations__')
32WRAPPER_UPDATES = ('__dict__',)
33def update_wrapper(wrapper,
34                   wrapped,
35                   assigned = WRAPPER_ASSIGNMENTS,
36                   updated = WRAPPER_UPDATES):
37    """Update a wrapper function to look like the wrapped function
38
39       wrapper is the function to be updated
40       wrapped is the original function
41       assigned is a tuple naming the attributes assigned directly
42       from the wrapped function to the wrapper function (defaults to
43       functools.WRAPPER_ASSIGNMENTS)
44       updated is a tuple naming the attributes of the wrapper that
45       are updated with the corresponding attribute from the wrapped
46       function (defaults to functools.WRAPPER_UPDATES)
47    """
48    for attr in assigned:
49        try:
50            value = getattr(wrapped, attr)
51        except AttributeError:
52            pass
53        else:
54            setattr(wrapper, attr, value)
55    for attr in updated:
56        getattr(wrapper, attr).update(getattr(wrapped, attr, {}))
57    # Issue #17482: set __wrapped__ last so we don't inadvertently copy it
58    # from the wrapped function when updating __dict__
59    wrapper.__wrapped__ = wrapped
60    # Return the wrapper so this can be used as a decorator via partial()
61    return wrapper
62
63def wraps(wrapped,
64          assigned = WRAPPER_ASSIGNMENTS,
65          updated = WRAPPER_UPDATES):
66    """Decorator factory to apply update_wrapper() to a wrapper function
67
68       Returns a decorator that invokes update_wrapper() with the decorated
69       function as the wrapper argument and the arguments to wraps() as the
70       remaining arguments. Default arguments are as for update_wrapper().
71       This is a convenience function to simplify applying partial() to
72       update_wrapper().
73    """
74    return partial(update_wrapper, wrapped=wrapped,
75                   assigned=assigned, updated=updated)
76
77
78################################################################################
79### total_ordering class decorator
80################################################################################
81
82# The total ordering functions all invoke the root magic method directly
83# rather than using the corresponding operator.  This avoids possible
84# infinite recursion that could occur when the operator dispatch logic
85# detects a NotImplemented result and then calls a reflected method.
86
87def _gt_from_lt(self, other, NotImplemented=NotImplemented):
88    'Return a > b.  Computed by @total_ordering from (not a < b) and (a != b).'
89    op_result = self.__lt__(other)
90    if op_result is NotImplemented:
91        return op_result
92    return not op_result and self != other
93
94def _le_from_lt(self, other, NotImplemented=NotImplemented):
95    'Return a <= b.  Computed by @total_ordering from (a < b) or (a == b).'
96    op_result = self.__lt__(other)
97    return op_result or self == other
98
99def _ge_from_lt(self, other, NotImplemented=NotImplemented):
100    'Return a >= b.  Computed by @total_ordering from (not a < b).'
101    op_result = self.__lt__(other)
102    if op_result is NotImplemented:
103        return op_result
104    return not op_result
105
106def _ge_from_le(self, other, NotImplemented=NotImplemented):
107    'Return a >= b.  Computed by @total_ordering from (not a <= b) or (a == b).'
108    op_result = self.__le__(other)
109    if op_result is NotImplemented:
110        return op_result
111    return not op_result or self == other
112
113def _lt_from_le(self, other, NotImplemented=NotImplemented):
114    'Return a < b.  Computed by @total_ordering from (a <= b) and (a != b).'
115    op_result = self.__le__(other)
116    if op_result is NotImplemented:
117        return op_result
118    return op_result and self != other
119
120def _gt_from_le(self, other, NotImplemented=NotImplemented):
121    'Return a > b.  Computed by @total_ordering from (not a <= b).'
122    op_result = self.__le__(other)
123    if op_result is NotImplemented:
124        return op_result
125    return not op_result
126
127def _lt_from_gt(self, other, NotImplemented=NotImplemented):
128    'Return a < b.  Computed by @total_ordering from (not a > b) and (a != b).'
129    op_result = self.__gt__(other)
130    if op_result is NotImplemented:
131        return op_result
132    return not op_result and self != other
133
134def _ge_from_gt(self, other, NotImplemented=NotImplemented):
135    'Return a >= b.  Computed by @total_ordering from (a > b) or (a == b).'
136    op_result = self.__gt__(other)
137    return op_result or self == other
138
139def _le_from_gt(self, other, NotImplemented=NotImplemented):
140    'Return a <= b.  Computed by @total_ordering from (not a > b).'
141    op_result = self.__gt__(other)
142    if op_result is NotImplemented:
143        return op_result
144    return not op_result
145
146def _le_from_ge(self, other, NotImplemented=NotImplemented):
147    'Return a <= b.  Computed by @total_ordering from (not a >= b) or (a == b).'
148    op_result = self.__ge__(other)
149    if op_result is NotImplemented:
150        return op_result
151    return not op_result or self == other
152
153def _gt_from_ge(self, other, NotImplemented=NotImplemented):
154    'Return a > b.  Computed by @total_ordering from (a >= b) and (a != b).'
155    op_result = self.__ge__(other)
156    if op_result is NotImplemented:
157        return op_result
158    return op_result and self != other
159
160def _lt_from_ge(self, other, NotImplemented=NotImplemented):
161    'Return a < b.  Computed by @total_ordering from (not a >= b).'
162    op_result = self.__ge__(other)
163    if op_result is NotImplemented:
164        return op_result
165    return not op_result
166
167_convert = {
168    '__lt__': [('__gt__', _gt_from_lt),
169               ('__le__', _le_from_lt),
170               ('__ge__', _ge_from_lt)],
171    '__le__': [('__ge__', _ge_from_le),
172               ('__lt__', _lt_from_le),
173               ('__gt__', _gt_from_le)],
174    '__gt__': [('__lt__', _lt_from_gt),
175               ('__ge__', _ge_from_gt),
176               ('__le__', _le_from_gt)],
177    '__ge__': [('__le__', _le_from_ge),
178               ('__gt__', _gt_from_ge),
179               ('__lt__', _lt_from_ge)]
180}
181
182def total_ordering(cls):
183    """Class decorator that fills in missing ordering methods"""
184    # Find user-defined comparisons (not those inherited from object).
185    roots = {op for op in _convert if getattr(cls, op, None) is not getattr(object, op, None)}
186    if not roots:
187        raise ValueError('must define at least one ordering operation: < > <= >=')
188    root = max(roots)       # prefer __lt__ to __le__ to __gt__ to __ge__
189    for opname, opfunc in _convert[root]:
190        if opname not in roots:
191            opfunc.__name__ = opname
192            setattr(cls, opname, opfunc)
193    return cls
194
195
196################################################################################
197### cmp_to_key() function converter
198################################################################################
199
200def cmp_to_key(mycmp):
201    """Convert a cmp= function into a key= function"""
202    class K(object):
203        __slots__ = ['obj']
204        def __init__(self, obj):
205            self.obj = obj
206        def __lt__(self, other):
207            return mycmp(self.obj, other.obj) < 0
208        def __gt__(self, other):
209            return mycmp(self.obj, other.obj) > 0
210        def __eq__(self, other):
211            return mycmp(self.obj, other.obj) == 0
212        def __le__(self, other):
213            return mycmp(self.obj, other.obj) <= 0
214        def __ge__(self, other):
215            return mycmp(self.obj, other.obj) >= 0
216        __hash__ = None
217    return K
218
219try:
220    from _functools import cmp_to_key
221except ImportError:
222    pass
223
224
225################################################################################
226### reduce() sequence to a single item
227################################################################################
228
229_initial_missing = object()
230
231def reduce(function, sequence, initial=_initial_missing):
232    """
233    reduce(function, sequence[, initial]) -> value
234
235    Apply a function of two arguments cumulatively to the items of a sequence,
236    from left to right, so as to reduce the sequence to a single value.
237    For example, reduce(lambda x, y: x+y, [1, 2, 3, 4, 5]) calculates
238    ((((1+2)+3)+4)+5).  If initial is present, it is placed before the items
239    of the sequence in the calculation, and serves as a default when the
240    sequence is empty.
241    """
242
243    it = iter(sequence)
244
245    if initial is _initial_missing:
246        try:
247            value = next(it)
248        except StopIteration:
249            raise TypeError("reduce() of empty sequence with no initial value") from None
250    else:
251        value = initial
252
253    for element in it:
254        value = function(value, element)
255
256    return value
257
258try:
259    from _functools import reduce
260except ImportError:
261    pass
262
263
264################################################################################
265### partial() argument application
266################################################################################
267
268# Purely functional, no descriptor behaviour
269class partial:
270    """New function with partial application of the given arguments
271    and keywords.
272    """
273
274    __slots__ = "func", "args", "keywords", "__dict__", "__weakref__"
275
276    def __new__(cls, func, /, *args, **keywords):
277        if not callable(func):
278            raise TypeError("the first argument must be callable")
279
280        if hasattr(func, "func"):
281            args = func.args + args
282            keywords = {**func.keywords, **keywords}
283            func = func.func
284
285        self = super(partial, cls).__new__(cls)
286
287        self.func = func
288        self.args = args
289        self.keywords = keywords
290        return self
291
292    def __call__(self, /, *args, **keywords):
293        keywords = {**self.keywords, **keywords}
294        return self.func(*self.args, *args, **keywords)
295
296    @recursive_repr()
297    def __repr__(self):
298        qualname = type(self).__qualname__
299        args = [repr(self.func)]
300        args.extend(repr(x) for x in self.args)
301        args.extend(f"{k}={v!r}" for (k, v) in self.keywords.items())
302        if type(self).__module__ == "functools":
303            return f"functools.{qualname}({', '.join(args)})"
304        return f"{qualname}({', '.join(args)})"
305
306    def __reduce__(self):
307        return type(self), (self.func,), (self.func, self.args,
308               self.keywords or None, self.__dict__ or None)
309
310    def __setstate__(self, state):
311        if not isinstance(state, tuple):
312            raise TypeError("argument to __setstate__ must be a tuple")
313        if len(state) != 4:
314            raise TypeError(f"expected 4 items in state, got {len(state)}")
315        func, args, kwds, namespace = state
316        if (not callable(func) or not isinstance(args, tuple) or
317           (kwds is not None and not isinstance(kwds, dict)) or
318           (namespace is not None and not isinstance(namespace, dict))):
319            raise TypeError("invalid partial state")
320
321        args = tuple(args) # just in case it's a subclass
322        if kwds is None:
323            kwds = {}
324        elif type(kwds) is not dict: # XXX does it need to be *exactly* dict?
325            kwds = dict(kwds)
326        if namespace is None:
327            namespace = {}
328
329        self.__dict__ = namespace
330        self.func = func
331        self.args = args
332        self.keywords = kwds
333
334try:
335    from _functools import partial
336except ImportError:
337    pass
338
339# Descriptor version
340class partialmethod(object):
341    """Method descriptor with partial application of the given arguments
342    and keywords.
343
344    Supports wrapping existing descriptors and handles non-descriptor
345    callables as instance methods.
346    """
347
348    def __init__(*args, **keywords):
349        if len(args) >= 2:
350            self, func, *args = args
351        elif not args:
352            raise TypeError("descriptor '__init__' of partialmethod "
353                            "needs an argument")
354        elif 'func' in keywords:
355            func = keywords.pop('func')
356            self, *args = args
357            import warnings
358            warnings.warn("Passing 'func' as keyword argument is deprecated",
359                          DeprecationWarning, stacklevel=2)
360        else:
361            raise TypeError("type 'partialmethod' takes at least one argument, "
362                            "got %d" % (len(args)-1))
363        args = tuple(args)
364
365        if not callable(func) and not hasattr(func, "__get__"):
366            raise TypeError("{!r} is not callable or a descriptor"
367                                 .format(func))
368
369        # func could be a descriptor like classmethod which isn't callable,
370        # so we can't inherit from partial (it verifies func is callable)
371        if isinstance(func, partialmethod):
372            # flattening is mandatory in order to place cls/self before all
373            # other arguments
374            # it's also more efficient since only one function will be called
375            self.func = func.func
376            self.args = func.args + args
377            self.keywords = {**func.keywords, **keywords}
378        else:
379            self.func = func
380            self.args = args
381            self.keywords = keywords
382    __init__.__text_signature__ = '($self, func, /, *args, **keywords)'
383
384    def __repr__(self):
385        args = ", ".join(map(repr, self.args))
386        keywords = ", ".join("{}={!r}".format(k, v)
387                                 for k, v in self.keywords.items())
388        format_string = "{module}.{cls}({func}, {args}, {keywords})"
389        return format_string.format(module=self.__class__.__module__,
390                                    cls=self.__class__.__qualname__,
391                                    func=self.func,
392                                    args=args,
393                                    keywords=keywords)
394
395    def _make_unbound_method(self):
396        def _method(cls_or_self, /, *args, **keywords):
397            keywords = {**self.keywords, **keywords}
398            return self.func(cls_or_self, *self.args, *args, **keywords)
399        _method.__isabstractmethod__ = self.__isabstractmethod__
400        _method._partialmethod = self
401        return _method
402
403    def __get__(self, obj, cls=None):
404        get = getattr(self.func, "__get__", None)
405        result = None
406        if get is not None:
407            new_func = get(obj, cls)
408            if new_func is not self.func:
409                # Assume __get__ returning something new indicates the
410                # creation of an appropriate callable
411                result = partial(new_func, *self.args, **self.keywords)
412                try:
413                    result.__self__ = new_func.__self__
414                except AttributeError:
415                    pass
416        if result is None:
417            # If the underlying descriptor didn't do anything, treat this
418            # like an instance method
419            result = self._make_unbound_method().__get__(obj, cls)
420        return result
421
422    @property
423    def __isabstractmethod__(self):
424        return getattr(self.func, "__isabstractmethod__", False)
425
426# Helper functions
427
428def _unwrap_partial(func):
429    while isinstance(func, partial):
430        func = func.func
431    return func
432
433################################################################################
434### LRU Cache function decorator
435################################################################################
436
437_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])
438
439class _HashedSeq(list):
440    """ This class guarantees that hash() will be called no more than once
441        per element.  This is important because the lru_cache() will hash
442        the key multiple times on a cache miss.
443
444    """
445
446    __slots__ = 'hashvalue'
447
448    def __init__(self, tup, hash=hash):
449        self[:] = tup
450        self.hashvalue = hash(tup)
451
452    def __hash__(self):
453        return self.hashvalue
454
455def _make_key(args, kwds, typed,
456             kwd_mark = (object(),),
457             fasttypes = {int, str},
458             tuple=tuple, type=type, len=len):
459    """Make a cache key from optionally typed positional and keyword arguments
460
461    The key is constructed in a way that is flat as possible rather than
462    as a nested structure that would take more memory.
463
464    If there is only a single argument and its data type is known to cache
465    its hash value, then that argument is returned without a wrapper.  This
466    saves space and improves lookup speed.
467
468    """
469    # All of code below relies on kwds preserving the order input by the user.
470    # Formerly, we sorted() the kwds before looping.  The new way is *much*
471    # faster; however, it means that f(x=1, y=2) will now be treated as a
472    # distinct call from f(y=2, x=1) which will be cached separately.
473    key = args
474    if kwds:
475        key += kwd_mark
476        for item in kwds.items():
477            key += item
478    if typed:
479        key += tuple(type(v) for v in args)
480        if kwds:
481            key += tuple(type(v) for v in kwds.values())
482    elif len(key) == 1 and type(key[0]) in fasttypes:
483        return key[0]
484    return _HashedSeq(key)
485
486def lru_cache(maxsize=128, typed=False):
487    """Least-recently-used cache decorator.
488
489    If *maxsize* is set to None, the LRU features are disabled and the cache
490    can grow without bound.
491
492    If *typed* is True, arguments of different types will be cached separately.
493    For example, f(3.0) and f(3) will be treated as distinct calls with
494    distinct results.
495
496    Arguments to the cached function must be hashable.
497
498    View the cache statistics named tuple (hits, misses, maxsize, currsize)
499    with f.cache_info().  Clear the cache and statistics with f.cache_clear().
500    Access the underlying function with f.__wrapped__.
501
502    See:  http://en.wikipedia.org/wiki/Cache_replacement_policies#Least_recently_used_(LRU)
503
504    """
505
506    # Users should only access the lru_cache through its public API:
507    #       cache_info, cache_clear, and f.__wrapped__
508    # The internals of the lru_cache are encapsulated for thread safety and
509    # to allow the implementation to change (including a possible C version).
510
511    if isinstance(maxsize, int):
512        # Negative maxsize is treated as 0
513        if maxsize < 0:
514            maxsize = 0
515    elif callable(maxsize) and isinstance(typed, bool):
516        # The user_function was passed in directly via the maxsize argument
517        user_function, maxsize = maxsize, 128
518        wrapper = _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo)
519        return update_wrapper(wrapper, user_function)
520    elif maxsize is not None:
521        raise TypeError(
522            'Expected first argument to be an integer, a callable, or None')
523
524    def decorating_function(user_function):
525        wrapper = _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo)
526        return update_wrapper(wrapper, user_function)
527
528    return decorating_function
529
530def _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo):
531    # Constants shared by all lru cache instances:
532    sentinel = object()          # unique object used to signal cache misses
533    make_key = _make_key         # build a key from the function arguments
534    PREV, NEXT, KEY, RESULT = 0, 1, 2, 3   # names for the link fields
535
536    cache = {}
537    hits = misses = 0
538    full = False
539    cache_get = cache.get    # bound method to lookup a key or return None
540    cache_len = cache.__len__  # get cache size without calling len()
541    lock = RLock()           # because linkedlist updates aren't threadsafe
542    root = []                # root of the circular doubly linked list
543    root[:] = [root, root, None, None]     # initialize by pointing to self
544
545    if maxsize == 0:
546
547        def wrapper(*args, **kwds):
548            # No caching -- just a statistics update
549            nonlocal misses
550            misses += 1
551            result = user_function(*args, **kwds)
552            return result
553
554    elif maxsize is None:
555
556        def wrapper(*args, **kwds):
557            # Simple caching without ordering or size limit
558            nonlocal hits, misses
559            key = make_key(args, kwds, typed)
560            result = cache_get(key, sentinel)
561            if result is not sentinel:
562                hits += 1
563                return result
564            misses += 1
565            result = user_function(*args, **kwds)
566            cache[key] = result
567            return result
568
569    else:
570
571        def wrapper(*args, **kwds):
572            # Size limited caching that tracks accesses by recency
573            nonlocal root, hits, misses, full
574            key = make_key(args, kwds, typed)
575            with lock:
576                link = cache_get(key)
577                if link is not None:
578                    # Move the link to the front of the circular queue
579                    link_prev, link_next, _key, result = link
580                    link_prev[NEXT] = link_next
581                    link_next[PREV] = link_prev
582                    last = root[PREV]
583                    last[NEXT] = root[PREV] = link
584                    link[PREV] = last
585                    link[NEXT] = root
586                    hits += 1
587                    return result
588                misses += 1
589            result = user_function(*args, **kwds)
590            with lock:
591                if key in cache:
592                    # Getting here means that this same key was added to the
593                    # cache while the lock was released.  Since the link
594                    # update is already done, we need only return the
595                    # computed result and update the count of misses.
596                    pass
597                elif full:
598                    # Use the old root to store the new key and result.
599                    oldroot = root
600                    oldroot[KEY] = key
601                    oldroot[RESULT] = result
602                    # Empty the oldest link and make it the new root.
603                    # Keep a reference to the old key and old result to
604                    # prevent their ref counts from going to zero during the
605                    # update. That will prevent potentially arbitrary object
606                    # clean-up code (i.e. __del__) from running while we're
607                    # still adjusting the links.
608                    root = oldroot[NEXT]
609                    oldkey = root[KEY]
610                    oldresult = root[RESULT]
611                    root[KEY] = root[RESULT] = None
612                    # Now update the cache dictionary.
613                    del cache[oldkey]
614                    # Save the potentially reentrant cache[key] assignment
615                    # for last, after the root and links have been put in
616                    # a consistent state.
617                    cache[key] = oldroot
618                else:
619                    # Put result in a new link at the front of the queue.
620                    last = root[PREV]
621                    link = [last, root, key, result]
622                    last[NEXT] = root[PREV] = cache[key] = link
623                    # Use the cache_len bound method instead of the len() function
624                    # which could potentially be wrapped in an lru_cache itself.
625                    full = (cache_len() >= maxsize)
626            return result
627
628    def cache_info():
629        """Report cache statistics"""
630        with lock:
631            return _CacheInfo(hits, misses, maxsize, cache_len())
632
633    def cache_clear():
634        """Clear the cache and cache statistics"""
635        nonlocal hits, misses, full
636        with lock:
637            cache.clear()
638            root[:] = [root, root, None, None]
639            hits = misses = 0
640            full = False
641
642    wrapper.cache_info = cache_info
643    wrapper.cache_clear = cache_clear
644    return wrapper
645
646try:
647    from _functools import _lru_cache_wrapper
648except ImportError:
649    pass
650
651
652################################################################################
653### singledispatch() - single-dispatch generic function decorator
654################################################################################
655
656def _c3_merge(sequences):
657    """Merges MROs in *sequences* to a single MRO using the C3 algorithm.
658
659    Adapted from http://www.python.org/download/releases/2.3/mro/.
660
661    """
662    result = []
663    while True:
664        sequences = [s for s in sequences if s]   # purge empty sequences
665        if not sequences:
666            return result
667        for s1 in sequences:   # find merge candidates among seq heads
668            candidate = s1[0]
669            for s2 in sequences:
670                if candidate in s2[1:]:
671                    candidate = None
672                    break      # reject the current head, it appears later
673            else:
674                break
675        if candidate is None:
676            raise RuntimeError("Inconsistent hierarchy")
677        result.append(candidate)
678        # remove the chosen candidate
679        for seq in sequences:
680            if seq[0] == candidate:
681                del seq[0]
682
683def _c3_mro(cls, abcs=None):
684    """Computes the method resolution order using extended C3 linearization.
685
686    If no *abcs* are given, the algorithm works exactly like the built-in C3
687    linearization used for method resolution.
688
689    If given, *abcs* is a list of abstract base classes that should be inserted
690    into the resulting MRO. Unrelated ABCs are ignored and don't end up in the
691    result. The algorithm inserts ABCs where their functionality is introduced,
692    i.e. issubclass(cls, abc) returns True for the class itself but returns
693    False for all its direct base classes. Implicit ABCs for a given class
694    (either registered or inferred from the presence of a special method like
695    __len__) are inserted directly after the last ABC explicitly listed in the
696    MRO of said class. If two implicit ABCs end up next to each other in the
697    resulting MRO, their ordering depends on the order of types in *abcs*.
698
699    """
700    for i, base in enumerate(reversed(cls.__bases__)):
701        if hasattr(base, '__abstractmethods__'):
702            boundary = len(cls.__bases__) - i
703            break   # Bases up to the last explicit ABC are considered first.
704    else:
705        boundary = 0
706    abcs = list(abcs) if abcs else []
707    explicit_bases = list(cls.__bases__[:boundary])
708    abstract_bases = []
709    other_bases = list(cls.__bases__[boundary:])
710    for base in abcs:
711        if issubclass(cls, base) and not any(
712                issubclass(b, base) for b in cls.__bases__
713            ):
714            # If *cls* is the class that introduces behaviour described by
715            # an ABC *base*, insert said ABC to its MRO.
716            abstract_bases.append(base)
717    for base in abstract_bases:
718        abcs.remove(base)
719    explicit_c3_mros = [_c3_mro(base, abcs=abcs) for base in explicit_bases]
720    abstract_c3_mros = [_c3_mro(base, abcs=abcs) for base in abstract_bases]
721    other_c3_mros = [_c3_mro(base, abcs=abcs) for base in other_bases]
722    return _c3_merge(
723        [[cls]] +
724        explicit_c3_mros + abstract_c3_mros + other_c3_mros +
725        [explicit_bases] + [abstract_bases] + [other_bases]
726    )
727
728def _compose_mro(cls, types):
729    """Calculates the method resolution order for a given class *cls*.
730
731    Includes relevant abstract base classes (with their respective bases) from
732    the *types* iterable. Uses a modified C3 linearization algorithm.
733
734    """
735    bases = set(cls.__mro__)
736    # Remove entries which are already present in the __mro__ or unrelated.
737    def is_related(typ):
738        return (typ not in bases and hasattr(typ, '__mro__')
739                                 and issubclass(cls, typ))
740    types = [n for n in types if is_related(n)]
741    # Remove entries which are strict bases of other entries (they will end up
742    # in the MRO anyway.
743    def is_strict_base(typ):
744        for other in types:
745            if typ != other and typ in other.__mro__:
746                return True
747        return False
748    types = [n for n in types if not is_strict_base(n)]
749    # Subclasses of the ABCs in *types* which are also implemented by
750    # *cls* can be used to stabilize ABC ordering.
751    type_set = set(types)
752    mro = []
753    for typ in types:
754        found = []
755        for sub in typ.__subclasses__():
756            if sub not in bases and issubclass(cls, sub):
757                found.append([s for s in sub.__mro__ if s in type_set])
758        if not found:
759            mro.append(typ)
760            continue
761        # Favor subclasses with the biggest number of useful bases
762        found.sort(key=len, reverse=True)
763        for sub in found:
764            for subcls in sub:
765                if subcls not in mro:
766                    mro.append(subcls)
767    return _c3_mro(cls, abcs=mro)
768
769def _find_impl(cls, registry):
770    """Returns the best matching implementation from *registry* for type *cls*.
771
772    Where there is no registered implementation for a specific type, its method
773    resolution order is used to find a more generic implementation.
774
775    Note: if *registry* does not contain an implementation for the base
776    *object* type, this function may return None.
777
778    """
779    mro = _compose_mro(cls, registry.keys())
780    match = None
781    for t in mro:
782        if match is not None:
783            # If *match* is an implicit ABC but there is another unrelated,
784            # equally matching implicit ABC, refuse the temptation to guess.
785            if (t in registry and t not in cls.__mro__
786                              and match not in cls.__mro__
787                              and not issubclass(match, t)):
788                raise RuntimeError("Ambiguous dispatch: {} or {}".format(
789                    match, t))
790            break
791        if t in registry:
792            match = t
793    return registry.get(match)
794
795def singledispatch(func):
796    """Single-dispatch generic function decorator.
797
798    Transforms a function into a generic function, which can have different
799    behaviours depending upon the type of its first argument. The decorated
800    function acts as the default implementation, and additional
801    implementations can be registered using the register() attribute of the
802    generic function.
803    """
804    # There are many programs that use functools without singledispatch, so we
805    # trade-off making singledispatch marginally slower for the benefit of
806    # making start-up of such applications slightly faster.
807    import types, weakref
808
809    registry = {}
810    dispatch_cache = weakref.WeakKeyDictionary()
811    cache_token = None
812
813    def dispatch(cls):
814        """generic_func.dispatch(cls) -> <function implementation>
815
816        Runs the dispatch algorithm to return the best available implementation
817        for the given *cls* registered on *generic_func*.
818
819        """
820        nonlocal cache_token
821        if cache_token is not None:
822            current_token = get_cache_token()
823            if cache_token != current_token:
824                dispatch_cache.clear()
825                cache_token = current_token
826        try:
827            impl = dispatch_cache[cls]
828        except KeyError:
829            try:
830                impl = registry[cls]
831            except KeyError:
832                impl = _find_impl(cls, registry)
833            dispatch_cache[cls] = impl
834        return impl
835
836    def register(cls, func=None):
837        """generic_func.register(cls, func) -> func
838
839        Registers a new implementation for the given *cls* on a *generic_func*.
840
841        """
842        nonlocal cache_token
843        if func is None:
844            if isinstance(cls, type):
845                return lambda f: register(cls, f)
846            ann = getattr(cls, '__annotations__', {})
847            if not ann:
848                raise TypeError(
849                    f"Invalid first argument to `register()`: {cls!r}. "
850                    f"Use either `@register(some_class)` or plain `@register` "
851                    f"on an annotated function."
852                )
853            func = cls
854
855            # only import typing if annotation parsing is necessary
856            from typing import get_type_hints
857            argname, cls = next(iter(get_type_hints(func).items()))
858            if not isinstance(cls, type):
859                raise TypeError(
860                    f"Invalid annotation for {argname!r}. "
861                    f"{cls!r} is not a class."
862                )
863        registry[cls] = func
864        if cache_token is None and hasattr(cls, '__abstractmethods__'):
865            cache_token = get_cache_token()
866        dispatch_cache.clear()
867        return func
868
869    def wrapper(*args, **kw):
870        if not args:
871            raise TypeError(f'{funcname} requires at least '
872                            '1 positional argument')
873
874        return dispatch(args[0].__class__)(*args, **kw)
875
876    funcname = getattr(func, '__name__', 'singledispatch function')
877    registry[object] = func
878    wrapper.register = register
879    wrapper.dispatch = dispatch
880    wrapper.registry = types.MappingProxyType(registry)
881    wrapper._clear_cache = dispatch_cache.clear
882    update_wrapper(wrapper, func)
883    return wrapper
884
885
886# Descriptor version
887class singledispatchmethod:
888    """Single-dispatch generic method descriptor.
889
890    Supports wrapping existing descriptors and handles non-descriptor
891    callables as instance methods.
892    """
893
894    def __init__(self, func):
895        if not callable(func) and not hasattr(func, "__get__"):
896            raise TypeError(f"{func!r} is not callable or a descriptor")
897
898        self.dispatcher = singledispatch(func)
899        self.func = func
900
901    def register(self, cls, method=None):
902        """generic_method.register(cls, func) -> func
903
904        Registers a new implementation for the given *cls* on a *generic_method*.
905        """
906        return self.dispatcher.register(cls, func=method)
907
908    def __get__(self, obj, cls=None):
909        def _method(*args, **kwargs):
910            method = self.dispatcher.dispatch(args[0].__class__)
911            return method.__get__(obj, cls)(*args, **kwargs)
912
913        _method.__isabstractmethod__ = self.__isabstractmethod__
914        _method.register = self.register
915        update_wrapper(_method, self.func)
916        return _method
917
918    @property
919    def __isabstractmethod__(self):
920        return getattr(self.func, '__isabstractmethod__', False)
921
922
923################################################################################
924### cached_property() - computed once per instance, cached as attribute
925################################################################################
926
927_NOT_FOUND = object()
928
929
930class cached_property:
931    def __init__(self, func):
932        self.func = func
933        self.attrname = None
934        self.__doc__ = func.__doc__
935        self.lock = RLock()
936
937    def __set_name__(self, owner, name):
938        if self.attrname is None:
939            self.attrname = name
940        elif name != self.attrname:
941            raise TypeError(
942                "Cannot assign the same cached_property to two different names "
943                f"({self.attrname!r} and {name!r})."
944            )
945
946    def __get__(self, instance, owner=None):
947        if instance is None:
948            return self
949        if self.attrname is None:
950            raise TypeError(
951                "Cannot use cached_property instance without calling __set_name__ on it.")
952        try:
953            cache = instance.__dict__
954        except AttributeError:  # not all objects have __dict__ (e.g. class defines slots)
955            msg = (
956                f"No '__dict__' attribute on {type(instance).__name__!r} "
957                f"instance to cache {self.attrname!r} property."
958            )
959            raise TypeError(msg) from None
960        val = cache.get(self.attrname, _NOT_FOUND)
961        if val is _NOT_FOUND:
962            with self.lock:
963                # check if another thread filled cache while we awaited lock
964                val = cache.get(self.attrname, _NOT_FOUND)
965                if val is _NOT_FOUND:
966                    val = self.func(instance)
967                    try:
968                        cache[self.attrname] = val
969                    except TypeError:
970                        msg = (
971                            f"The '__dict__' attribute on {type(instance).__name__!r} instance "
972                            f"does not support item assignment for caching {self.attrname!r} property."
973                        )
974                        raise TypeError(msg) from None
975        return val
976