blob: 5cab497d264037b6dae2d647ce13a1f75663ef5d [file] [log] [blame]
Olivier Deprezf4ef2d02021-04-20 13:36:24 +02001"""functools.py - Tools for working with functions and callable objects
2"""
3# Python module wrapper for _functools C module
4# to allow utilities written in Python to be added
5# to the functools module.
6# Written by Nick Coghlan <ncoghlan at gmail.com>,
7# Raymond Hettinger <python at rcn.com>,
8# and Ɓukasz Langa <lukasz at langa.pl>.
9# Copyright (C) 2006-2013 Python Software Foundation.
10# See C source code for _functools credits/copyright
11
12__all__ = ['update_wrapper', 'wraps', 'WRAPPER_ASSIGNMENTS', 'WRAPPER_UPDATES',
13 'total_ordering', 'cache', 'cmp_to_key', 'lru_cache', 'reduce',
14 'partial', 'partialmethod', 'singledispatch', 'singledispatchmethod',
15 'cached_property']
16
17from abc import get_cache_token
18from collections import namedtuple
19# import types, weakref # Deferred to single_dispatch()
20from reprlib import recursive_repr
21from _thread import RLock
22from types import GenericAlias
23
24
25################################################################################
26### update_wrapper() and wraps() decorator
27################################################################################
28
29# update_wrapper() and wraps() are tools to help write
30# wrapper functions that can handle naive introspection
31
32WRAPPER_ASSIGNMENTS = ('__module__', '__name__', '__qualname__', '__doc__',
33 '__annotations__')
34WRAPPER_UPDATES = ('__dict__',)
35def update_wrapper(wrapper,
36 wrapped,
37 assigned = WRAPPER_ASSIGNMENTS,
38 updated = WRAPPER_UPDATES):
39 """Update a wrapper function to look like the wrapped function
40
41 wrapper is the function to be updated
42 wrapped is the original function
43 assigned is a tuple naming the attributes assigned directly
44 from the wrapped function to the wrapper function (defaults to
45 functools.WRAPPER_ASSIGNMENTS)
46 updated is a tuple naming the attributes of the wrapper that
47 are updated with the corresponding attribute from the wrapped
48 function (defaults to functools.WRAPPER_UPDATES)
49 """
50 for attr in assigned:
51 try:
52 value = getattr(wrapped, attr)
53 except AttributeError:
54 pass
55 else:
56 setattr(wrapper, attr, value)
57 for attr in updated:
58 getattr(wrapper, attr).update(getattr(wrapped, attr, {}))
59 # Issue #17482: set __wrapped__ last so we don't inadvertently copy it
60 # from the wrapped function when updating __dict__
61 wrapper.__wrapped__ = wrapped
62 # Return the wrapper so this can be used as a decorator via partial()
63 return wrapper
64
65def wraps(wrapped,
66 assigned = WRAPPER_ASSIGNMENTS,
67 updated = WRAPPER_UPDATES):
68 """Decorator factory to apply update_wrapper() to a wrapper function
69
70 Returns a decorator that invokes update_wrapper() with the decorated
71 function as the wrapper argument and the arguments to wraps() as the
72 remaining arguments. Default arguments are as for update_wrapper().
73 This is a convenience function to simplify applying partial() to
74 update_wrapper().
75 """
76 return partial(update_wrapper, wrapped=wrapped,
77 assigned=assigned, updated=updated)
78
79
80################################################################################
81### total_ordering class decorator
82################################################################################
83
84# The total ordering functions all invoke the root magic method directly
85# rather than using the corresponding operator. This avoids possible
86# infinite recursion that could occur when the operator dispatch logic
87# detects a NotImplemented result and then calls a reflected method.
88
89def _gt_from_lt(self, other, NotImplemented=NotImplemented):
90 'Return a > b. Computed by @total_ordering from (not a < b) and (a != b).'
91 op_result = self.__lt__(other)
92 if op_result is NotImplemented:
93 return op_result
94 return not op_result and self != other
95
96def _le_from_lt(self, other, NotImplemented=NotImplemented):
97 'Return a <= b. Computed by @total_ordering from (a < b) or (a == b).'
98 op_result = self.__lt__(other)
99 if op_result is NotImplemented:
100 return op_result
101 return op_result or self == other
102
103def _ge_from_lt(self, other, NotImplemented=NotImplemented):
104 'Return a >= b. Computed by @total_ordering from (not a < b).'
105 op_result = self.__lt__(other)
106 if op_result is NotImplemented:
107 return op_result
108 return not op_result
109
110def _ge_from_le(self, other, NotImplemented=NotImplemented):
111 'Return a >= b. Computed by @total_ordering from (not a <= b) or (a == b).'
112 op_result = self.__le__(other)
113 if op_result is NotImplemented:
114 return op_result
115 return not op_result or self == other
116
117def _lt_from_le(self, other, NotImplemented=NotImplemented):
118 'Return a < b. Computed by @total_ordering from (a <= b) and (a != b).'
119 op_result = self.__le__(other)
120 if op_result is NotImplemented:
121 return op_result
122 return op_result and self != other
123
124def _gt_from_le(self, other, NotImplemented=NotImplemented):
125 'Return a > b. Computed by @total_ordering from (not a <= b).'
126 op_result = self.__le__(other)
127 if op_result is NotImplemented:
128 return op_result
129 return not op_result
130
131def _lt_from_gt(self, other, NotImplemented=NotImplemented):
132 'Return a < b. Computed by @total_ordering from (not a > b) and (a != b).'
133 op_result = self.__gt__(other)
134 if op_result is NotImplemented:
135 return op_result
136 return not op_result and self != other
137
138def _ge_from_gt(self, other, NotImplemented=NotImplemented):
139 'Return a >= b. Computed by @total_ordering from (a > b) or (a == b).'
140 op_result = self.__gt__(other)
141 if op_result is NotImplemented:
142 return op_result
143 return op_result or self == other
144
145def _le_from_gt(self, other, NotImplemented=NotImplemented):
146 'Return a <= b. Computed by @total_ordering from (not a > b).'
147 op_result = self.__gt__(other)
148 if op_result is NotImplemented:
149 return op_result
150 return not op_result
151
152def _le_from_ge(self, other, NotImplemented=NotImplemented):
153 'Return a <= b. Computed by @total_ordering from (not a >= b) or (a == b).'
154 op_result = self.__ge__(other)
155 if op_result is NotImplemented:
156 return op_result
157 return not op_result or self == other
158
159def _gt_from_ge(self, other, NotImplemented=NotImplemented):
160 'Return a > b. Computed by @total_ordering from (a >= b) and (a != b).'
161 op_result = self.__ge__(other)
162 if op_result is NotImplemented:
163 return op_result
164 return op_result and self != other
165
166def _lt_from_ge(self, other, NotImplemented=NotImplemented):
167 'Return a < b. Computed by @total_ordering from (not a >= b).'
168 op_result = self.__ge__(other)
169 if op_result is NotImplemented:
170 return op_result
171 return not op_result
172
173_convert = {
174 '__lt__': [('__gt__', _gt_from_lt),
175 ('__le__', _le_from_lt),
176 ('__ge__', _ge_from_lt)],
177 '__le__': [('__ge__', _ge_from_le),
178 ('__lt__', _lt_from_le),
179 ('__gt__', _gt_from_le)],
180 '__gt__': [('__lt__', _lt_from_gt),
181 ('__ge__', _ge_from_gt),
182 ('__le__', _le_from_gt)],
183 '__ge__': [('__le__', _le_from_ge),
184 ('__gt__', _gt_from_ge),
185 ('__lt__', _lt_from_ge)]
186}
187
188def total_ordering(cls):
189 """Class decorator that fills in missing ordering methods"""
190 # Find user-defined comparisons (not those inherited from object).
191 roots = {op for op in _convert if getattr(cls, op, None) is not getattr(object, op, None)}
192 if not roots:
193 raise ValueError('must define at least one ordering operation: < > <= >=')
194 root = max(roots) # prefer __lt__ to __le__ to __gt__ to __ge__
195 for opname, opfunc in _convert[root]:
196 if opname not in roots:
197 opfunc.__name__ = opname
198 setattr(cls, opname, opfunc)
199 return cls
200
201
202################################################################################
203### cmp_to_key() function converter
204################################################################################
205
206def cmp_to_key(mycmp):
207 """Convert a cmp= function into a key= function"""
208 class K(object):
209 __slots__ = ['obj']
210 def __init__(self, obj):
211 self.obj = obj
212 def __lt__(self, other):
213 return mycmp(self.obj, other.obj) < 0
214 def __gt__(self, other):
215 return mycmp(self.obj, other.obj) > 0
216 def __eq__(self, other):
217 return mycmp(self.obj, other.obj) == 0
218 def __le__(self, other):
219 return mycmp(self.obj, other.obj) <= 0
220 def __ge__(self, other):
221 return mycmp(self.obj, other.obj) >= 0
222 __hash__ = None
223 return K
224
225try:
226 from _functools import cmp_to_key
227except ImportError:
228 pass
229
230
231################################################################################
232### reduce() sequence to a single item
233################################################################################
234
235_initial_missing = object()
236
237def reduce(function, sequence, initial=_initial_missing):
238 """
239 reduce(function, sequence[, initial]) -> value
240
241 Apply a function of two arguments cumulatively to the items of a sequence,
242 from left to right, so as to reduce the sequence to a single value.
243 For example, reduce(lambda x, y: x+y, [1, 2, 3, 4, 5]) calculates
244 ((((1+2)+3)+4)+5). If initial is present, it is placed before the items
245 of the sequence in the calculation, and serves as a default when the
246 sequence is empty.
247 """
248
249 it = iter(sequence)
250
251 if initial is _initial_missing:
252 try:
253 value = next(it)
254 except StopIteration:
255 raise TypeError("reduce() of empty sequence with no initial value") from None
256 else:
257 value = initial
258
259 for element in it:
260 value = function(value, element)
261
262 return value
263
264try:
265 from _functools import reduce
266except ImportError:
267 pass
268
269
270################################################################################
271### partial() argument application
272################################################################################
273
274# Purely functional, no descriptor behaviour
275class partial:
276 """New function with partial application of the given arguments
277 and keywords.
278 """
279
280 __slots__ = "func", "args", "keywords", "__dict__", "__weakref__"
281
282 def __new__(cls, func, /, *args, **keywords):
283 if not callable(func):
284 raise TypeError("the first argument must be callable")
285
286 if hasattr(func, "func"):
287 args = func.args + args
288 keywords = {**func.keywords, **keywords}
289 func = func.func
290
291 self = super(partial, cls).__new__(cls)
292
293 self.func = func
294 self.args = args
295 self.keywords = keywords
296 return self
297
298 def __call__(self, /, *args, **keywords):
299 keywords = {**self.keywords, **keywords}
300 return self.func(*self.args, *args, **keywords)
301
302 @recursive_repr()
303 def __repr__(self):
304 qualname = type(self).__qualname__
305 args = [repr(self.func)]
306 args.extend(repr(x) for x in self.args)
307 args.extend(f"{k}={v!r}" for (k, v) in self.keywords.items())
308 if type(self).__module__ == "functools":
309 return f"functools.{qualname}({', '.join(args)})"
310 return f"{qualname}({', '.join(args)})"
311
312 def __reduce__(self):
313 return type(self), (self.func,), (self.func, self.args,
314 self.keywords or None, self.__dict__ or None)
315
316 def __setstate__(self, state):
317 if not isinstance(state, tuple):
318 raise TypeError("argument to __setstate__ must be a tuple")
319 if len(state) != 4:
320 raise TypeError(f"expected 4 items in state, got {len(state)}")
321 func, args, kwds, namespace = state
322 if (not callable(func) or not isinstance(args, tuple) or
323 (kwds is not None and not isinstance(kwds, dict)) or
324 (namespace is not None and not isinstance(namespace, dict))):
325 raise TypeError("invalid partial state")
326
327 args = tuple(args) # just in case it's a subclass
328 if kwds is None:
329 kwds = {}
330 elif type(kwds) is not dict: # XXX does it need to be *exactly* dict?
331 kwds = dict(kwds)
332 if namespace is None:
333 namespace = {}
334
335 self.__dict__ = namespace
336 self.func = func
337 self.args = args
338 self.keywords = kwds
339
340try:
341 from _functools import partial
342except ImportError:
343 pass
344
345# Descriptor version
346class partialmethod(object):
347 """Method descriptor with partial application of the given arguments
348 and keywords.
349
350 Supports wrapping existing descriptors and handles non-descriptor
351 callables as instance methods.
352 """
353
354 def __init__(self, func, /, *args, **keywords):
355 if not callable(func) and not hasattr(func, "__get__"):
356 raise TypeError("{!r} is not callable or a descriptor"
357 .format(func))
358
359 # func could be a descriptor like classmethod which isn't callable,
360 # so we can't inherit from partial (it verifies func is callable)
361 if isinstance(func, partialmethod):
362 # flattening is mandatory in order to place cls/self before all
363 # other arguments
364 # it's also more efficient since only one function will be called
365 self.func = func.func
366 self.args = func.args + args
367 self.keywords = {**func.keywords, **keywords}
368 else:
369 self.func = func
370 self.args = args
371 self.keywords = keywords
372
373 def __repr__(self):
374 args = ", ".join(map(repr, self.args))
375 keywords = ", ".join("{}={!r}".format(k, v)
376 for k, v in self.keywords.items())
377 format_string = "{module}.{cls}({func}, {args}, {keywords})"
378 return format_string.format(module=self.__class__.__module__,
379 cls=self.__class__.__qualname__,
380 func=self.func,
381 args=args,
382 keywords=keywords)
383
384 def _make_unbound_method(self):
385 def _method(cls_or_self, /, *args, **keywords):
386 keywords = {**self.keywords, **keywords}
387 return self.func(cls_or_self, *self.args, *args, **keywords)
388 _method.__isabstractmethod__ = self.__isabstractmethod__
389 _method._partialmethod = self
390 return _method
391
392 def __get__(self, obj, cls=None):
393 get = getattr(self.func, "__get__", None)
394 result = None
395 if get is not None:
396 new_func = get(obj, cls)
397 if new_func is not self.func:
398 # Assume __get__ returning something new indicates the
399 # creation of an appropriate callable
400 result = partial(new_func, *self.args, **self.keywords)
401 try:
402 result.__self__ = new_func.__self__
403 except AttributeError:
404 pass
405 if result is None:
406 # If the underlying descriptor didn't do anything, treat this
407 # like an instance method
408 result = self._make_unbound_method().__get__(obj, cls)
409 return result
410
411 @property
412 def __isabstractmethod__(self):
413 return getattr(self.func, "__isabstractmethod__", False)
414
415 __class_getitem__ = classmethod(GenericAlias)
416
417
418# Helper functions
419
420def _unwrap_partial(func):
421 while isinstance(func, partial):
422 func = func.func
423 return func
424
425################################################################################
426### LRU Cache function decorator
427################################################################################
428
429_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])
430
431class _HashedSeq(list):
432 """ This class guarantees that hash() will be called no more than once
433 per element. This is important because the lru_cache() will hash
434 the key multiple times on a cache miss.
435
436 """
437
438 __slots__ = 'hashvalue'
439
440 def __init__(self, tup, hash=hash):
441 self[:] = tup
442 self.hashvalue = hash(tup)
443
444 def __hash__(self):
445 return self.hashvalue
446
447def _make_key(args, kwds, typed,
448 kwd_mark = (object(),),
449 fasttypes = {int, str},
450 tuple=tuple, type=type, len=len):
451 """Make a cache key from optionally typed positional and keyword arguments
452
453 The key is constructed in a way that is flat as possible rather than
454 as a nested structure that would take more memory.
455
456 If there is only a single argument and its data type is known to cache
457 its hash value, then that argument is returned without a wrapper. This
458 saves space and improves lookup speed.
459
460 """
461 # All of code below relies on kwds preserving the order input by the user.
462 # Formerly, we sorted() the kwds before looping. The new way is *much*
463 # faster; however, it means that f(x=1, y=2) will now be treated as a
464 # distinct call from f(y=2, x=1) which will be cached separately.
465 key = args
466 if kwds:
467 key += kwd_mark
468 for item in kwds.items():
469 key += item
470 if typed:
471 key += tuple(type(v) for v in args)
472 if kwds:
473 key += tuple(type(v) for v in kwds.values())
474 elif len(key) == 1 and type(key[0]) in fasttypes:
475 return key[0]
476 return _HashedSeq(key)
477
478def lru_cache(maxsize=128, typed=False):
479 """Least-recently-used cache decorator.
480
481 If *maxsize* is set to None, the LRU features are disabled and the cache
482 can grow without bound.
483
484 If *typed* is True, arguments of different types will be cached separately.
485 For example, f(3.0) and f(3) will be treated as distinct calls with
486 distinct results.
487
488 Arguments to the cached function must be hashable.
489
490 View the cache statistics named tuple (hits, misses, maxsize, currsize)
491 with f.cache_info(). Clear the cache and statistics with f.cache_clear().
492 Access the underlying function with f.__wrapped__.
493
494 See: http://en.wikipedia.org/wiki/Cache_replacement_policies#Least_recently_used_(LRU)
495
496 """
497
498 # Users should only access the lru_cache through its public API:
499 # cache_info, cache_clear, and f.__wrapped__
500 # The internals of the lru_cache are encapsulated for thread safety and
501 # to allow the implementation to change (including a possible C version).
502
503 if isinstance(maxsize, int):
504 # Negative maxsize is treated as 0
505 if maxsize < 0:
506 maxsize = 0
507 elif callable(maxsize) and isinstance(typed, bool):
508 # The user_function was passed in directly via the maxsize argument
509 user_function, maxsize = maxsize, 128
510 wrapper = _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo)
511 wrapper.cache_parameters = lambda : {'maxsize': maxsize, 'typed': typed}
512 return update_wrapper(wrapper, user_function)
513 elif maxsize is not None:
514 raise TypeError(
515 'Expected first argument to be an integer, a callable, or None')
516
517 def decorating_function(user_function):
518 wrapper = _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo)
519 wrapper.cache_parameters = lambda : {'maxsize': maxsize, 'typed': typed}
520 return update_wrapper(wrapper, user_function)
521
522 return decorating_function
523
524def _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo):
525 # Constants shared by all lru cache instances:
526 sentinel = object() # unique object used to signal cache misses
527 make_key = _make_key # build a key from the function arguments
528 PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields
529
530 cache = {}
531 hits = misses = 0
532 full = False
533 cache_get = cache.get # bound method to lookup a key or return None
534 cache_len = cache.__len__ # get cache size without calling len()
535 lock = RLock() # because linkedlist updates aren't threadsafe
536 root = [] # root of the circular doubly linked list
537 root[:] = [root, root, None, None] # initialize by pointing to self
538
539 if maxsize == 0:
540
541 def wrapper(*args, **kwds):
542 # No caching -- just a statistics update
543 nonlocal misses
544 misses += 1
545 result = user_function(*args, **kwds)
546 return result
547
548 elif maxsize is None:
549
550 def wrapper(*args, **kwds):
551 # Simple caching without ordering or size limit
552 nonlocal hits, misses
553 key = make_key(args, kwds, typed)
554 result = cache_get(key, sentinel)
555 if result is not sentinel:
556 hits += 1
557 return result
558 misses += 1
559 result = user_function(*args, **kwds)
560 cache[key] = result
561 return result
562
563 else:
564
565 def wrapper(*args, **kwds):
566 # Size limited caching that tracks accesses by recency
567 nonlocal root, hits, misses, full
568 key = make_key(args, kwds, typed)
569 with lock:
570 link = cache_get(key)
571 if link is not None:
572 # Move the link to the front of the circular queue
573 link_prev, link_next, _key, result = link
574 link_prev[NEXT] = link_next
575 link_next[PREV] = link_prev
576 last = root[PREV]
577 last[NEXT] = root[PREV] = link
578 link[PREV] = last
579 link[NEXT] = root
580 hits += 1
581 return result
582 misses += 1
583 result = user_function(*args, **kwds)
584 with lock:
585 if key in cache:
586 # Getting here means that this same key was added to the
587 # cache while the lock was released. Since the link
588 # update is already done, we need only return the
589 # computed result and update the count of misses.
590 pass
591 elif full:
592 # Use the old root to store the new key and result.
593 oldroot = root
594 oldroot[KEY] = key
595 oldroot[RESULT] = result
596 # Empty the oldest link and make it the new root.
597 # Keep a reference to the old key and old result to
598 # prevent their ref counts from going to zero during the
599 # update. That will prevent potentially arbitrary object
600 # clean-up code (i.e. __del__) from running while we're
601 # still adjusting the links.
602 root = oldroot[NEXT]
603 oldkey = root[KEY]
604 oldresult = root[RESULT]
605 root[KEY] = root[RESULT] = None
606 # Now update the cache dictionary.
607 del cache[oldkey]
608 # Save the potentially reentrant cache[key] assignment
609 # for last, after the root and links have been put in
610 # a consistent state.
611 cache[key] = oldroot
612 else:
613 # Put result in a new link at the front of the queue.
614 last = root[PREV]
615 link = [last, root, key, result]
616 last[NEXT] = root[PREV] = cache[key] = link
617 # Use the cache_len bound method instead of the len() function
618 # which could potentially be wrapped in an lru_cache itself.
619 full = (cache_len() >= maxsize)
620 return result
621
622 def cache_info():
623 """Report cache statistics"""
624 with lock:
625 return _CacheInfo(hits, misses, maxsize, cache_len())
626
627 def cache_clear():
628 """Clear the cache and cache statistics"""
629 nonlocal hits, misses, full
630 with lock:
631 cache.clear()
632 root[:] = [root, root, None, None]
633 hits = misses = 0
634 full = False
635
636 wrapper.cache_info = cache_info
637 wrapper.cache_clear = cache_clear
638 return wrapper
639
640try:
641 from _functools import _lru_cache_wrapper
642except ImportError:
643 pass
644
645
646################################################################################
647### cache -- simplified access to the infinity cache
648################################################################################
649
650def cache(user_function, /):
651 'Simple lightweight unbounded cache. Sometimes called "memoize".'
652 return lru_cache(maxsize=None)(user_function)
653
654
655################################################################################
656### singledispatch() - single-dispatch generic function decorator
657################################################################################
658
659def _c3_merge(sequences):
660 """Merges MROs in *sequences* to a single MRO using the C3 algorithm.
661
662 Adapted from http://www.python.org/download/releases/2.3/mro/.
663
664 """
665 result = []
666 while True:
667 sequences = [s for s in sequences if s] # purge empty sequences
668 if not sequences:
669 return result
670 for s1 in sequences: # find merge candidates among seq heads
671 candidate = s1[0]
672 for s2 in sequences:
673 if candidate in s2[1:]:
674 candidate = None
675 break # reject the current head, it appears later
676 else:
677 break
678 if candidate is None:
679 raise RuntimeError("Inconsistent hierarchy")
680 result.append(candidate)
681 # remove the chosen candidate
682 for seq in sequences:
683 if seq[0] == candidate:
684 del seq[0]
685
686def _c3_mro(cls, abcs=None):
687 """Computes the method resolution order using extended C3 linearization.
688
689 If no *abcs* are given, the algorithm works exactly like the built-in C3
690 linearization used for method resolution.
691
692 If given, *abcs* is a list of abstract base classes that should be inserted
693 into the resulting MRO. Unrelated ABCs are ignored and don't end up in the
694 result. The algorithm inserts ABCs where their functionality is introduced,
695 i.e. issubclass(cls, abc) returns True for the class itself but returns
696 False for all its direct base classes. Implicit ABCs for a given class
697 (either registered or inferred from the presence of a special method like
698 __len__) are inserted directly after the last ABC explicitly listed in the
699 MRO of said class. If two implicit ABCs end up next to each other in the
700 resulting MRO, their ordering depends on the order of types in *abcs*.
701
702 """
703 for i, base in enumerate(reversed(cls.__bases__)):
704 if hasattr(base, '__abstractmethods__'):
705 boundary = len(cls.__bases__) - i
706 break # Bases up to the last explicit ABC are considered first.
707 else:
708 boundary = 0
709 abcs = list(abcs) if abcs else []
710 explicit_bases = list(cls.__bases__[:boundary])
711 abstract_bases = []
712 other_bases = list(cls.__bases__[boundary:])
713 for base in abcs:
714 if issubclass(cls, base) and not any(
715 issubclass(b, base) for b in cls.__bases__
716 ):
717 # If *cls* is the class that introduces behaviour described by
718 # an ABC *base*, insert said ABC to its MRO.
719 abstract_bases.append(base)
720 for base in abstract_bases:
721 abcs.remove(base)
722 explicit_c3_mros = [_c3_mro(base, abcs=abcs) for base in explicit_bases]
723 abstract_c3_mros = [_c3_mro(base, abcs=abcs) for base in abstract_bases]
724 other_c3_mros = [_c3_mro(base, abcs=abcs) for base in other_bases]
725 return _c3_merge(
726 [[cls]] +
727 explicit_c3_mros + abstract_c3_mros + other_c3_mros +
728 [explicit_bases] + [abstract_bases] + [other_bases]
729 )
730
731def _compose_mro(cls, types):
732 """Calculates the method resolution order for a given class *cls*.
733
734 Includes relevant abstract base classes (with their respective bases) from
735 the *types* iterable. Uses a modified C3 linearization algorithm.
736
737 """
738 bases = set(cls.__mro__)
739 # Remove entries which are already present in the __mro__ or unrelated.
740 def is_related(typ):
741 return (typ not in bases and hasattr(typ, '__mro__')
742 and issubclass(cls, typ))
743 types = [n for n in types if is_related(n)]
744 # Remove entries which are strict bases of other entries (they will end up
745 # in the MRO anyway.
746 def is_strict_base(typ):
747 for other in types:
748 if typ != other and typ in other.__mro__:
749 return True
750 return False
751 types = [n for n in types if not is_strict_base(n)]
752 # Subclasses of the ABCs in *types* which are also implemented by
753 # *cls* can be used to stabilize ABC ordering.
754 type_set = set(types)
755 mro = []
756 for typ in types:
757 found = []
758 for sub in typ.__subclasses__():
759 if sub not in bases and issubclass(cls, sub):
760 found.append([s for s in sub.__mro__ if s in type_set])
761 if not found:
762 mro.append(typ)
763 continue
764 # Favor subclasses with the biggest number of useful bases
765 found.sort(key=len, reverse=True)
766 for sub in found:
767 for subcls in sub:
768 if subcls not in mro:
769 mro.append(subcls)
770 return _c3_mro(cls, abcs=mro)
771
772def _find_impl(cls, registry):
773 """Returns the best matching implementation from *registry* for type *cls*.
774
775 Where there is no registered implementation for a specific type, its method
776 resolution order is used to find a more generic implementation.
777
778 Note: if *registry* does not contain an implementation for the base
779 *object* type, this function may return None.
780
781 """
782 mro = _compose_mro(cls, registry.keys())
783 match = None
784 for t in mro:
785 if match is not None:
786 # If *match* is an implicit ABC but there is another unrelated,
787 # equally matching implicit ABC, refuse the temptation to guess.
788 if (t in registry and t not in cls.__mro__
789 and match not in cls.__mro__
790 and not issubclass(match, t)):
791 raise RuntimeError("Ambiguous dispatch: {} or {}".format(
792 match, t))
793 break
794 if t in registry:
795 match = t
796 return registry.get(match)
797
798def singledispatch(func):
799 """Single-dispatch generic function decorator.
800
801 Transforms a function into a generic function, which can have different
802 behaviours depending upon the type of its first argument. The decorated
803 function acts as the default implementation, and additional
804 implementations can be registered using the register() attribute of the
805 generic function.
806 """
807 # There are many programs that use functools without singledispatch, so we
808 # trade-off making singledispatch marginally slower for the benefit of
809 # making start-up of such applications slightly faster.
810 import types, weakref
811
812 registry = {}
813 dispatch_cache = weakref.WeakKeyDictionary()
814 cache_token = None
815
816 def dispatch(cls):
817 """generic_func.dispatch(cls) -> <function implementation>
818
819 Runs the dispatch algorithm to return the best available implementation
820 for the given *cls* registered on *generic_func*.
821
822 """
823 nonlocal cache_token
824 if cache_token is not None:
825 current_token = get_cache_token()
826 if cache_token != current_token:
827 dispatch_cache.clear()
828 cache_token = current_token
829 try:
830 impl = dispatch_cache[cls]
831 except KeyError:
832 try:
833 impl = registry[cls]
834 except KeyError:
835 impl = _find_impl(cls, registry)
836 dispatch_cache[cls] = impl
837 return impl
838
839 def register(cls, func=None):
840 """generic_func.register(cls, func) -> func
841
842 Registers a new implementation for the given *cls* on a *generic_func*.
843
844 """
845 nonlocal cache_token
846 if func is None:
847 if isinstance(cls, type):
848 return lambda f: register(cls, f)
849 ann = getattr(cls, '__annotations__', {})
850 if not ann:
851 raise TypeError(
852 f"Invalid first argument to `register()`: {cls!r}. "
853 f"Use either `@register(some_class)` or plain `@register` "
854 f"on an annotated function."
855 )
856 func = cls
857
858 # only import typing if annotation parsing is necessary
859 from typing import get_type_hints
860 argname, cls = next(iter(get_type_hints(func).items()))
861 if not isinstance(cls, type):
862 raise TypeError(
863 f"Invalid annotation for {argname!r}. "
864 f"{cls!r} is not a class."
865 )
866 registry[cls] = func
867 if cache_token is None and hasattr(cls, '__abstractmethods__'):
868 cache_token = get_cache_token()
869 dispatch_cache.clear()
870 return func
871
872 def wrapper(*args, **kw):
873 if not args:
874 raise TypeError(f'{funcname} requires at least '
875 '1 positional argument')
876
877 return dispatch(args[0].__class__)(*args, **kw)
878
879 funcname = getattr(func, '__name__', 'singledispatch function')
880 registry[object] = func
881 wrapper.register = register
882 wrapper.dispatch = dispatch
883 wrapper.registry = types.MappingProxyType(registry)
884 wrapper._clear_cache = dispatch_cache.clear
885 update_wrapper(wrapper, func)
886 return wrapper
887
888
889# Descriptor version
890class singledispatchmethod:
891 """Single-dispatch generic method descriptor.
892
893 Supports wrapping existing descriptors and handles non-descriptor
894 callables as instance methods.
895 """
896
897 def __init__(self, func):
898 if not callable(func) and not hasattr(func, "__get__"):
899 raise TypeError(f"{func!r} is not callable or a descriptor")
900
901 self.dispatcher = singledispatch(func)
902 self.func = func
903
904 def register(self, cls, method=None):
905 """generic_method.register(cls, func) -> func
906
907 Registers a new implementation for the given *cls* on a *generic_method*.
908 """
909 return self.dispatcher.register(cls, func=method)
910
911 def __get__(self, obj, cls=None):
912 def _method(*args, **kwargs):
913 method = self.dispatcher.dispatch(args[0].__class__)
914 return method.__get__(obj, cls)(*args, **kwargs)
915
916 _method.__isabstractmethod__ = self.__isabstractmethod__
917 _method.register = self.register
918 update_wrapper(_method, self.func)
919 return _method
920
921 @property
922 def __isabstractmethod__(self):
923 return getattr(self.func, '__isabstractmethod__', False)
924
925
926################################################################################
927### cached_property() - computed once per instance, cached as attribute
928################################################################################
929
930_NOT_FOUND = object()
931
932
933class cached_property:
934 def __init__(self, func):
935 self.func = func
936 self.attrname = None
937 self.__doc__ = func.__doc__
938 self.lock = RLock()
939
940 def __set_name__(self, owner, name):
941 if self.attrname is None:
942 self.attrname = name
943 elif name != self.attrname:
944 raise TypeError(
945 "Cannot assign the same cached_property to two different names "
946 f"({self.attrname!r} and {name!r})."
947 )
948
949 def __get__(self, instance, owner=None):
950 if instance is None:
951 return self
952 if self.attrname is None:
953 raise TypeError(
954 "Cannot use cached_property instance without calling __set_name__ on it.")
955 try:
956 cache = instance.__dict__
957 except AttributeError: # not all objects have __dict__ (e.g. class defines slots)
958 msg = (
959 f"No '__dict__' attribute on {type(instance).__name__!r} "
960 f"instance to cache {self.attrname!r} property."
961 )
962 raise TypeError(msg) from None
963 val = cache.get(self.attrname, _NOT_FOUND)
964 if val is _NOT_FOUND:
965 with self.lock:
966 # check if another thread filled cache while we awaited lock
967 val = cache.get(self.attrname, _NOT_FOUND)
968 if val is _NOT_FOUND:
969 val = self.func(instance)
970 try:
971 cache[self.attrname] = val
972 except TypeError:
973 msg = (
974 f"The '__dict__' attribute on {type(instance).__name__!r} instance "
975 f"does not support item assignment for caching {self.attrname!r} property."
976 )
977 raise TypeError(msg) from None
978 return val
979
980 __class_getitem__ = classmethod(GenericAlias)