• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2001-2022 by Vinay Sajip. All Rights Reserved.
2#
3# Permission to use, copy, modify, and distribute this software and its
4# documentation for any purpose and without fee is hereby granted,
5# provided that the above copyright notice appear in all copies and that
6# both that copyright notice and this permission notice appear in
7# supporting documentation, and that the name of Vinay Sajip
8# not be used in advertising or publicity pertaining to distribution
9# of the software without specific, written prior permission.
10# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
11# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
12# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
13# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
14# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
15# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16
17"""
18Logging package for Python. Based on PEP 282 and comments thereto in
19comp.lang.python.
20
21Copyright (C) 2001-2022 Vinay Sajip. All Rights Reserved.
22
23To use, simply 'import logging' and log away!
24"""
25
26import sys, os, time, io, re, traceback, warnings, weakref, collections.abc
27
28from types import GenericAlias
29from string import Template
30from string import Formatter as StrFormatter
31
32
33__all__ = ['BASIC_FORMAT', 'BufferingFormatter', 'CRITICAL', 'DEBUG', 'ERROR',
34           'FATAL', 'FileHandler', 'Filter', 'Formatter', 'Handler', 'INFO',
35           'LogRecord', 'Logger', 'LoggerAdapter', 'NOTSET', 'NullHandler',
36           'StreamHandler', 'WARN', 'WARNING', 'addLevelName', 'basicConfig',
37           'captureWarnings', 'critical', 'debug', 'disable', 'error',
38           'exception', 'fatal', 'getLevelName', 'getLogger', 'getLoggerClass',
39           'info', 'log', 'makeLogRecord', 'setLoggerClass', 'shutdown',
40           'warn', 'warning', 'getLogRecordFactory', 'setLogRecordFactory',
41           'lastResort', 'raiseExceptions', 'getLevelNamesMapping',
42           'getHandlerByName', 'getHandlerNames']
43
44import threading
45
46__author__  = "Vinay Sajip <vinay_sajip@red-dove.com>"
47__status__  = "production"
48# The following module attributes are no longer updated.
49__version__ = "0.5.1.2"
50__date__    = "07 February 2010"
51
52#---------------------------------------------------------------------------
53#   Miscellaneous module data
54#---------------------------------------------------------------------------
55
56#
57#_startTime is used as the base when calculating the relative time of events
58#
59_startTime = time.time_ns()
60
61#
62#raiseExceptions is used to see if exceptions during handling should be
63#propagated
64#
65raiseExceptions = True
66
67#
68# If you don't want threading information in the log, set this to False
69#
70logThreads = True
71
72#
73# If you don't want multiprocessing information in the log, set this to False
74#
75logMultiprocessing = True
76
77#
78# If you don't want process information in the log, set this to False
79#
80logProcesses = True
81
82#
83# If you don't want asyncio task information in the log, set this to False
84#
85logAsyncioTasks = True
86
87#---------------------------------------------------------------------------
88#   Level related stuff
89#---------------------------------------------------------------------------
90#
91# Default levels and level names, these can be replaced with any positive set
92# of values having corresponding names. There is a pseudo-level, NOTSET, which
93# is only really there as a lower limit for user-defined levels. Handlers and
94# loggers are initialized with NOTSET so that they will log all messages, even
95# at user-defined levels.
96#
97
98CRITICAL = 50
99FATAL = CRITICAL
100ERROR = 40
101WARNING = 30
102WARN = WARNING
103INFO = 20
104DEBUG = 10
105NOTSET = 0
106
107_levelToName = {
108    CRITICAL: 'CRITICAL',
109    ERROR: 'ERROR',
110    WARNING: 'WARNING',
111    INFO: 'INFO',
112    DEBUG: 'DEBUG',
113    NOTSET: 'NOTSET',
114}
115_nameToLevel = {
116    'CRITICAL': CRITICAL,
117    'FATAL': FATAL,
118    'ERROR': ERROR,
119    'WARN': WARNING,
120    'WARNING': WARNING,
121    'INFO': INFO,
122    'DEBUG': DEBUG,
123    'NOTSET': NOTSET,
124}
125
126def getLevelNamesMapping():
127    return _nameToLevel.copy()
128
129def getLevelName(level):
130    """
131    Return the textual or numeric representation of logging level 'level'.
132
133    If the level is one of the predefined levels (CRITICAL, ERROR, WARNING,
134    INFO, DEBUG) then you get the corresponding string. If you have
135    associated levels with names using addLevelName then the name you have
136    associated with 'level' is returned.
137
138    If a numeric value corresponding to one of the defined levels is passed
139    in, the corresponding string representation is returned.
140
141    If a string representation of the level is passed in, the corresponding
142    numeric value is returned.
143
144    If no matching numeric or string value is passed in, the string
145    'Level %s' % level is returned.
146    """
147    # See Issues #22386, #27937 and #29220 for why it's this way
148    result = _levelToName.get(level)
149    if result is not None:
150        return result
151    result = _nameToLevel.get(level)
152    if result is not None:
153        return result
154    return "Level %s" % level
155
156def addLevelName(level, levelName):
157    """
158    Associate 'levelName' with 'level'.
159
160    This is used when converting levels to text during message formatting.
161    """
162    with _lock:
163        _levelToName[level] = levelName
164        _nameToLevel[levelName] = level
165
166if hasattr(sys, "_getframe"):
167    currentframe = lambda: sys._getframe(1)
168else: #pragma: no cover
169    def currentframe():
170        """Return the frame object for the caller's stack frame."""
171        try:
172            raise Exception
173        except Exception as exc:
174            return exc.__traceback__.tb_frame.f_back
175
176#
177# _srcfile is used when walking the stack to check when we've got the first
178# caller stack frame, by skipping frames whose filename is that of this
179# module's source. It therefore should contain the filename of this module's
180# source file.
181#
182# Ordinarily we would use __file__ for this, but frozen modules don't always
183# have __file__ set, for some reason (see Issue #21736). Thus, we get the
184# filename from a handy code object from a function defined in this module.
185# (There's no particular reason for picking addLevelName.)
186#
187
188_srcfile = os.path.normcase(addLevelName.__code__.co_filename)
189
190# _srcfile is only used in conjunction with sys._getframe().
191# Setting _srcfile to None will prevent findCaller() from being called. This
192# way, you can avoid the overhead of fetching caller information.
193
194# The following is based on warnings._is_internal_frame. It makes sure that
195# frames of the import mechanism are skipped when logging at module level and
196# using a stacklevel value greater than one.
197def _is_internal_frame(frame):
198    """Signal whether the frame is a CPython or logging module internal."""
199    filename = os.path.normcase(frame.f_code.co_filename)
200    return filename == _srcfile or (
201        "importlib" in filename and "_bootstrap" in filename
202    )
203
204
205def _checkLevel(level):
206    if isinstance(level, int):
207        rv = level
208    elif str(level) == level:
209        if level not in _nameToLevel:
210            raise ValueError("Unknown level: %r" % level)
211        rv = _nameToLevel[level]
212    else:
213        raise TypeError("Level not an integer or a valid string: %r"
214                        % (level,))
215    return rv
216
217#---------------------------------------------------------------------------
218#   Thread-related stuff
219#---------------------------------------------------------------------------
220
221#
222#_lock is used to serialize access to shared data structures in this module.
223#This needs to be an RLock because fileConfig() creates and configures
224#Handlers, and so might arbitrary user threads. Since Handler code updates the
225#shared dictionary _handlers, it needs to acquire the lock. But if configuring,
226#the lock would already have been acquired - so we need an RLock.
227#The same argument applies to Loggers and Manager.loggerDict.
228#
229_lock = threading.RLock()
230
231def _prepareFork():
232    """
233    Prepare to fork a new child process by acquiring the module-level lock.
234
235    This should be used in conjunction with _afterFork().
236    """
237    # Wrap the lock acquisition in a try-except to prevent the lock from being
238    # abandoned in the event of an asynchronous exception. See gh-106238.
239    try:
240        _lock.acquire()
241    except BaseException:
242        _lock.release()
243        raise
244
245def _afterFork():
246    """
247    After a new child process has been forked, release the module-level lock.
248
249    This should be used in conjunction with _prepareFork().
250    """
251    _lock.release()
252
253
254# Prevent a held logging lock from blocking a child from logging.
255
256if not hasattr(os, 'register_at_fork'):  # Windows and friends.
257    def _register_at_fork_reinit_lock(instance):
258        pass  # no-op when os.register_at_fork does not exist.
259else:
260    # A collection of instances with a _at_fork_reinit method (logging.Handler)
261    # to be called in the child after forking.  The weakref avoids us keeping
262    # discarded Handler instances alive.
263    _at_fork_reinit_lock_weakset = weakref.WeakSet()
264
265    def _register_at_fork_reinit_lock(instance):
266        with _lock:
267            _at_fork_reinit_lock_weakset.add(instance)
268
269    def _after_at_fork_child_reinit_locks():
270        for handler in _at_fork_reinit_lock_weakset:
271            handler._at_fork_reinit()
272
273        # _prepareFork() was called in the parent before forking.
274        # The lock is reinitialized to unlocked state.
275        _lock._at_fork_reinit()
276
277    os.register_at_fork(before=_prepareFork,
278                        after_in_child=_after_at_fork_child_reinit_locks,
279                        after_in_parent=_afterFork)
280
281
282#---------------------------------------------------------------------------
283#   The logging record
284#---------------------------------------------------------------------------
285
286class LogRecord(object):
287    """
288    A LogRecord instance represents an event being logged.
289
290    LogRecord instances are created every time something is logged. They
291    contain all the information pertinent to the event being logged. The
292    main information passed in is in msg and args, which are combined
293    using str(msg) % args to create the message field of the record. The
294    record also includes information such as when the record was created,
295    the source line where the logging call was made, and any exception
296    information to be logged.
297    """
298    def __init__(self, name, level, pathname, lineno,
299                 msg, args, exc_info, func=None, sinfo=None, **kwargs):
300        """
301        Initialize a logging record with interesting information.
302        """
303        ct = time.time_ns()
304        self.name = name
305        self.msg = msg
306        #
307        # The following statement allows passing of a dictionary as a sole
308        # argument, so that you can do something like
309        #  logging.debug("a %(a)d b %(b)s", {'a':1, 'b':2})
310        # Suggested by Stefan Behnel.
311        # Note that without the test for args[0], we get a problem because
312        # during formatting, we test to see if the arg is present using
313        # 'if self.args:'. If the event being logged is e.g. 'Value is %d'
314        # and if the passed arg fails 'if self.args:' then no formatting
315        # is done. For example, logger.warning('Value is %d', 0) would log
316        # 'Value is %d' instead of 'Value is 0'.
317        # For the use case of passing a dictionary, this should not be a
318        # problem.
319        # Issue #21172: a request was made to relax the isinstance check
320        # to hasattr(args[0], '__getitem__'). However, the docs on string
321        # formatting still seem to suggest a mapping object is required.
322        # Thus, while not removing the isinstance check, it does now look
323        # for collections.abc.Mapping rather than, as before, dict.
324        if (args and len(args) == 1 and isinstance(args[0], collections.abc.Mapping)
325            and args[0]):
326            args = args[0]
327        self.args = args
328        self.levelname = getLevelName(level)
329        self.levelno = level
330        self.pathname = pathname
331        try:
332            self.filename = os.path.basename(pathname)
333            self.module = os.path.splitext(self.filename)[0]
334        except (TypeError, ValueError, AttributeError):
335            self.filename = pathname
336            self.module = "Unknown module"
337        self.exc_info = exc_info
338        self.exc_text = None      # used to cache the traceback text
339        self.stack_info = sinfo
340        self.lineno = lineno
341        self.funcName = func
342        self.created = ct / 1e9  # ns to float seconds
343        # Get the number of whole milliseconds (0-999) in the fractional part of seconds.
344        # Eg: 1_677_903_920_999_998_503 ns --> 999_998_503 ns--> 999 ms
345        # Convert to float by adding 0.0 for historical reasons. See gh-89047
346        self.msecs = (ct % 1_000_000_000) // 1_000_000 + 0.0
347        if self.msecs == 999.0 and int(self.created) != ct // 1_000_000_000:
348            # ns -> sec conversion can round up, e.g:
349            # 1_677_903_920_999_999_900 ns --> 1_677_903_921.0 sec
350            self.msecs = 0.0
351
352        self.relativeCreated = (ct - _startTime) / 1e6
353        if logThreads:
354            self.thread = threading.get_ident()
355            self.threadName = threading.current_thread().name
356        else: # pragma: no cover
357            self.thread = None
358            self.threadName = None
359        if not logMultiprocessing: # pragma: no cover
360            self.processName = None
361        else:
362            self.processName = 'MainProcess'
363            mp = sys.modules.get('multiprocessing')
364            if mp is not None:
365                # Errors may occur if multiprocessing has not finished loading
366                # yet - e.g. if a custom import hook causes third-party code
367                # to run when multiprocessing calls import. See issue 8200
368                # for an example
369                try:
370                    self.processName = mp.current_process().name
371                except Exception: #pragma: no cover
372                    pass
373        if logProcesses and hasattr(os, 'getpid'):
374            self.process = os.getpid()
375        else:
376            self.process = None
377
378        self.taskName = None
379        if logAsyncioTasks:
380            asyncio = sys.modules.get('asyncio')
381            if asyncio:
382                try:
383                    self.taskName = asyncio.current_task().get_name()
384                except Exception:
385                    pass
386
387    def __repr__(self):
388        return '<LogRecord: %s, %s, %s, %s, "%s">'%(self.name, self.levelno,
389            self.pathname, self.lineno, self.msg)
390
391    def getMessage(self):
392        """
393        Return the message for this LogRecord.
394
395        Return the message for this LogRecord after merging any user-supplied
396        arguments with the message.
397        """
398        msg = str(self.msg)
399        if self.args:
400            msg = msg % self.args
401        return msg
402
403#
404#   Determine which class to use when instantiating log records.
405#
406_logRecordFactory = LogRecord
407
408def setLogRecordFactory(factory):
409    """
410    Set the factory to be used when instantiating a log record.
411
412    :param factory: A callable which will be called to instantiate
413    a log record.
414    """
415    global _logRecordFactory
416    _logRecordFactory = factory
417
418def getLogRecordFactory():
419    """
420    Return the factory to be used when instantiating a log record.
421    """
422
423    return _logRecordFactory
424
425def makeLogRecord(dict):
426    """
427    Make a LogRecord whose attributes are defined by the specified dictionary,
428    This function is useful for converting a logging event received over
429    a socket connection (which is sent as a dictionary) into a LogRecord
430    instance.
431    """
432    rv = _logRecordFactory(None, None, "", 0, "", (), None, None)
433    rv.__dict__.update(dict)
434    return rv
435
436
437#---------------------------------------------------------------------------
438#   Formatter classes and functions
439#---------------------------------------------------------------------------
440_str_formatter = StrFormatter()
441del StrFormatter
442
443
444class PercentStyle(object):
445
446    default_format = '%(message)s'
447    asctime_format = '%(asctime)s'
448    asctime_search = '%(asctime)'
449    validation_pattern = re.compile(r'%\(\w+\)[#0+ -]*(\*|\d+)?(\.(\*|\d+))?[diouxefgcrsa%]', re.I)
450
451    def __init__(self, fmt, *, defaults=None):
452        self._fmt = fmt or self.default_format
453        self._defaults = defaults
454
455    def usesTime(self):
456        return self._fmt.find(self.asctime_search) >= 0
457
458    def validate(self):
459        """Validate the input format, ensure it matches the correct style"""
460        if not self.validation_pattern.search(self._fmt):
461            raise ValueError("Invalid format '%s' for '%s' style" % (self._fmt, self.default_format[0]))
462
463    def _format(self, record):
464        if defaults := self._defaults:
465            values = defaults | record.__dict__
466        else:
467            values = record.__dict__
468        return self._fmt % values
469
470    def format(self, record):
471        try:
472            return self._format(record)
473        except KeyError as e:
474            raise ValueError('Formatting field not found in record: %s' % e)
475
476
477class StrFormatStyle(PercentStyle):
478    default_format = '{message}'
479    asctime_format = '{asctime}'
480    asctime_search = '{asctime'
481
482    fmt_spec = re.compile(r'^(.?[<>=^])?[+ -]?#?0?(\d+|{\w+})?[,_]?(\.(\d+|{\w+}))?[bcdefgnosx%]?$', re.I)
483    field_spec = re.compile(r'^(\d+|\w+)(\.\w+|\[[^]]+\])*$')
484
485    def _format(self, record):
486        if defaults := self._defaults:
487            values = defaults | record.__dict__
488        else:
489            values = record.__dict__
490        return self._fmt.format(**values)
491
492    def validate(self):
493        """Validate the input format, ensure it is the correct string formatting style"""
494        fields = set()
495        try:
496            for _, fieldname, spec, conversion in _str_formatter.parse(self._fmt):
497                if fieldname:
498                    if not self.field_spec.match(fieldname):
499                        raise ValueError('invalid field name/expression: %r' % fieldname)
500                    fields.add(fieldname)
501                if conversion and conversion not in 'rsa':
502                    raise ValueError('invalid conversion: %r' % conversion)
503                if spec and not self.fmt_spec.match(spec):
504                    raise ValueError('bad specifier: %r' % spec)
505        except ValueError as e:
506            raise ValueError('invalid format: %s' % e)
507        if not fields:
508            raise ValueError('invalid format: no fields')
509
510
511class StringTemplateStyle(PercentStyle):
512    default_format = '${message}'
513    asctime_format = '${asctime}'
514    asctime_search = '${asctime}'
515
516    def __init__(self, *args, **kwargs):
517        super().__init__(*args, **kwargs)
518        self._tpl = Template(self._fmt)
519
520    def usesTime(self):
521        fmt = self._fmt
522        return fmt.find('$asctime') >= 0 or fmt.find(self.asctime_search) >= 0
523
524    def validate(self):
525        pattern = Template.pattern
526        fields = set()
527        for m in pattern.finditer(self._fmt):
528            d = m.groupdict()
529            if d['named']:
530                fields.add(d['named'])
531            elif d['braced']:
532                fields.add(d['braced'])
533            elif m.group(0) == '$':
534                raise ValueError('invalid format: bare \'$\' not allowed')
535        if not fields:
536            raise ValueError('invalid format: no fields')
537
538    def _format(self, record):
539        if defaults := self._defaults:
540            values = defaults | record.__dict__
541        else:
542            values = record.__dict__
543        return self._tpl.substitute(**values)
544
545
546BASIC_FORMAT = "%(levelname)s:%(name)s:%(message)s"
547
548_STYLES = {
549    '%': (PercentStyle, BASIC_FORMAT),
550    '{': (StrFormatStyle, '{levelname}:{name}:{message}'),
551    '$': (StringTemplateStyle, '${levelname}:${name}:${message}'),
552}
553
554class Formatter(object):
555    """
556    Formatter instances are used to convert a LogRecord to text.
557
558    Formatters need to know how a LogRecord is constructed. They are
559    responsible for converting a LogRecord to (usually) a string which can
560    be interpreted by either a human or an external system. The base Formatter
561    allows a formatting string to be specified. If none is supplied, the
562    style-dependent default value, "%(message)s", "{message}", or
563    "${message}", is used.
564
565    The Formatter can be initialized with a format string which makes use of
566    knowledge of the LogRecord attributes - e.g. the default value mentioned
567    above makes use of the fact that the user's message and arguments are pre-
568    formatted into a LogRecord's message attribute. Currently, the useful
569    attributes in a LogRecord are described by:
570
571    %(name)s            Name of the logger (logging channel)
572    %(levelno)s         Numeric logging level for the message (DEBUG, INFO,
573                        WARNING, ERROR, CRITICAL)
574    %(levelname)s       Text logging level for the message ("DEBUG", "INFO",
575                        "WARNING", "ERROR", "CRITICAL")
576    %(pathname)s        Full pathname of the source file where the logging
577                        call was issued (if available)
578    %(filename)s        Filename portion of pathname
579    %(module)s          Module (name portion of filename)
580    %(lineno)d          Source line number where the logging call was issued
581                        (if available)
582    %(funcName)s        Function name
583    %(created)f         Time when the LogRecord was created (time.time_ns() / 1e9
584                        return value)
585    %(asctime)s         Textual time when the LogRecord was created
586    %(msecs)d           Millisecond portion of the creation time
587    %(relativeCreated)d Time in milliseconds when the LogRecord was created,
588                        relative to the time the logging module was loaded
589                        (typically at application startup time)
590    %(thread)d          Thread ID (if available)
591    %(threadName)s      Thread name (if available)
592    %(taskName)s        Task name (if available)
593    %(process)d         Process ID (if available)
594    %(message)s         The result of record.getMessage(), computed just as
595                        the record is emitted
596    """
597
598    converter = time.localtime
599
600    def __init__(self, fmt=None, datefmt=None, style='%', validate=True, *,
601                 defaults=None):
602        """
603        Initialize the formatter with specified format strings.
604
605        Initialize the formatter either with the specified format string, or a
606        default as described above. Allow for specialized date formatting with
607        the optional datefmt argument. If datefmt is omitted, you get an
608        ISO8601-like (or RFC 3339-like) format.
609
610        Use a style parameter of '%', '{' or '$' to specify that you want to
611        use one of %-formatting, :meth:`str.format` (``{}``) formatting or
612        :class:`string.Template` formatting in your format string.
613
614        .. versionchanged:: 3.2
615           Added the ``style`` parameter.
616        """
617        if style not in _STYLES:
618            raise ValueError('Style must be one of: %s' % ','.join(
619                             _STYLES.keys()))
620        self._style = _STYLES[style][0](fmt, defaults=defaults)
621        if validate:
622            self._style.validate()
623
624        self._fmt = self._style._fmt
625        self.datefmt = datefmt
626
627    default_time_format = '%Y-%m-%d %H:%M:%S'
628    default_msec_format = '%s,%03d'
629
630    def formatTime(self, record, datefmt=None):
631        """
632        Return the creation time of the specified LogRecord as formatted text.
633
634        This method should be called from format() by a formatter which
635        wants to make use of a formatted time. This method can be overridden
636        in formatters to provide for any specific requirement, but the
637        basic behaviour is as follows: if datefmt (a string) is specified,
638        it is used with time.strftime() to format the creation time of the
639        record. Otherwise, an ISO8601-like (or RFC 3339-like) format is used.
640        The resulting string is returned. This function uses a user-configurable
641        function to convert the creation time to a tuple. By default,
642        time.localtime() is used; to change this for a particular formatter
643        instance, set the 'converter' attribute to a function with the same
644        signature as time.localtime() or time.gmtime(). To change it for all
645        formatters, for example if you want all logging times to be shown in GMT,
646        set the 'converter' attribute in the Formatter class.
647        """
648        ct = self.converter(record.created)
649        if datefmt:
650            s = time.strftime(datefmt, ct)
651        else:
652            s = time.strftime(self.default_time_format, ct)
653            if self.default_msec_format:
654                s = self.default_msec_format % (s, record.msecs)
655        return s
656
657    def formatException(self, ei):
658        """
659        Format and return the specified exception information as a string.
660
661        This default implementation just uses
662        traceback.print_exception()
663        """
664        sio = io.StringIO()
665        tb = ei[2]
666        # See issues #9427, #1553375. Commented out for now.
667        #if getattr(self, 'fullstack', False):
668        #    traceback.print_stack(tb.tb_frame.f_back, file=sio)
669        traceback.print_exception(ei[0], ei[1], tb, limit=None, file=sio)
670        s = sio.getvalue()
671        sio.close()
672        if s[-1:] == "\n":
673            s = s[:-1]
674        return s
675
676    def usesTime(self):
677        """
678        Check if the format uses the creation time of the record.
679        """
680        return self._style.usesTime()
681
682    def formatMessage(self, record):
683        return self._style.format(record)
684
685    def formatStack(self, stack_info):
686        """
687        This method is provided as an extension point for specialized
688        formatting of stack information.
689
690        The input data is a string as returned from a call to
691        :func:`traceback.print_stack`, but with the last trailing newline
692        removed.
693
694        The base implementation just returns the value passed in.
695        """
696        return stack_info
697
698    def format(self, record):
699        """
700        Format the specified record as text.
701
702        The record's attribute dictionary is used as the operand to a
703        string formatting operation which yields the returned string.
704        Before formatting the dictionary, a couple of preparatory steps
705        are carried out. The message attribute of the record is computed
706        using LogRecord.getMessage(). If the formatting string uses the
707        time (as determined by a call to usesTime(), formatTime() is
708        called to format the event time. If there is exception information,
709        it is formatted using formatException() and appended to the message.
710        """
711        record.message = record.getMessage()
712        if self.usesTime():
713            record.asctime = self.formatTime(record, self.datefmt)
714        s = self.formatMessage(record)
715        if record.exc_info:
716            # Cache the traceback text to avoid converting it multiple times
717            # (it's constant anyway)
718            if not record.exc_text:
719                record.exc_text = self.formatException(record.exc_info)
720        if record.exc_text:
721            if s[-1:] != "\n":
722                s = s + "\n"
723            s = s + record.exc_text
724        if record.stack_info:
725            if s[-1:] != "\n":
726                s = s + "\n"
727            s = s + self.formatStack(record.stack_info)
728        return s
729
730#
731#   The default formatter to use when no other is specified
732#
733_defaultFormatter = Formatter()
734
735class BufferingFormatter(object):
736    """
737    A formatter suitable for formatting a number of records.
738    """
739    def __init__(self, linefmt=None):
740        """
741        Optionally specify a formatter which will be used to format each
742        individual record.
743        """
744        if linefmt:
745            self.linefmt = linefmt
746        else:
747            self.linefmt = _defaultFormatter
748
749    def formatHeader(self, records):
750        """
751        Return the header string for the specified records.
752        """
753        return ""
754
755    def formatFooter(self, records):
756        """
757        Return the footer string for the specified records.
758        """
759        return ""
760
761    def format(self, records):
762        """
763        Format the specified records and return the result as a string.
764        """
765        rv = ""
766        if len(records) > 0:
767            rv = rv + self.formatHeader(records)
768            for record in records:
769                rv = rv + self.linefmt.format(record)
770            rv = rv + self.formatFooter(records)
771        return rv
772
773#---------------------------------------------------------------------------
774#   Filter classes and functions
775#---------------------------------------------------------------------------
776
777class Filter(object):
778    """
779    Filter instances are used to perform arbitrary filtering of LogRecords.
780
781    Loggers and Handlers can optionally use Filter instances to filter
782    records as desired. The base filter class only allows events which are
783    below a certain point in the logger hierarchy. For example, a filter
784    initialized with "A.B" will allow events logged by loggers "A.B",
785    "A.B.C", "A.B.C.D", "A.B.D" etc. but not "A.BB", "B.A.B" etc. If
786    initialized with the empty string, all events are passed.
787    """
788    def __init__(self, name=''):
789        """
790        Initialize a filter.
791
792        Initialize with the name of the logger which, together with its
793        children, will have its events allowed through the filter. If no
794        name is specified, allow every event.
795        """
796        self.name = name
797        self.nlen = len(name)
798
799    def filter(self, record):
800        """
801        Determine if the specified record is to be logged.
802
803        Returns True if the record should be logged, or False otherwise.
804        If deemed appropriate, the record may be modified in-place.
805        """
806        if self.nlen == 0:
807            return True
808        elif self.name == record.name:
809            return True
810        elif record.name.find(self.name, 0, self.nlen) != 0:
811            return False
812        return (record.name[self.nlen] == ".")
813
814class Filterer(object):
815    """
816    A base class for loggers and handlers which allows them to share
817    common code.
818    """
819    def __init__(self):
820        """
821        Initialize the list of filters to be an empty list.
822        """
823        self.filters = []
824
825    def addFilter(self, filter):
826        """
827        Add the specified filter to this handler.
828        """
829        if not (filter in self.filters):
830            self.filters.append(filter)
831
832    def removeFilter(self, filter):
833        """
834        Remove the specified filter from this handler.
835        """
836        if filter in self.filters:
837            self.filters.remove(filter)
838
839    def filter(self, record):
840        """
841        Determine if a record is loggable by consulting all the filters.
842
843        The default is to allow the record to be logged; any filter can veto
844        this by returning a false value.
845        If a filter attached to a handler returns a log record instance,
846        then that instance is used in place of the original log record in
847        any further processing of the event by that handler.
848        If a filter returns any other true value, the original log record
849        is used in any further processing of the event by that handler.
850
851        If none of the filters return false values, this method returns
852        a log record.
853        If any of the filters return a false value, this method returns
854        a false value.
855
856        .. versionchanged:: 3.2
857
858           Allow filters to be just callables.
859
860        .. versionchanged:: 3.12
861           Allow filters to return a LogRecord instead of
862           modifying it in place.
863        """
864        for f in self.filters:
865            if hasattr(f, 'filter'):
866                result = f.filter(record)
867            else:
868                result = f(record) # assume callable - will raise if not
869            if not result:
870                return False
871            if isinstance(result, LogRecord):
872                record = result
873        return record
874
875#---------------------------------------------------------------------------
876#   Handler classes and functions
877#---------------------------------------------------------------------------
878
879_handlers = weakref.WeakValueDictionary()  #map of handler names to handlers
880_handlerList = [] # added to allow handlers to be removed in reverse of order initialized
881
882def _removeHandlerRef(wr):
883    """
884    Remove a handler reference from the internal cleanup list.
885    """
886    # This function can be called during module teardown, when globals are
887    # set to None. It can also be called from another thread. So we need to
888    # pre-emptively grab the necessary globals and check if they're None,
889    # to prevent race conditions and failures during interpreter shutdown.
890    handlers, lock = _handlerList, _lock
891    if lock and handlers:
892        with lock:
893            try:
894                handlers.remove(wr)
895            except ValueError:
896                pass
897
898def _addHandlerRef(handler):
899    """
900    Add a handler to the internal cleanup list using a weak reference.
901    """
902    with _lock:
903        _handlerList.append(weakref.ref(handler, _removeHandlerRef))
904
905
906def getHandlerByName(name):
907    """
908    Get a handler with the specified *name*, or None if there isn't one with
909    that name.
910    """
911    return _handlers.get(name)
912
913
914def getHandlerNames():
915    """
916    Return all known handler names as an immutable set.
917    """
918    return frozenset(_handlers)
919
920
921class Handler(Filterer):
922    """
923    Handler instances dispatch logging events to specific destinations.
924
925    The base handler class. Acts as a placeholder which defines the Handler
926    interface. Handlers can optionally use Formatter instances to format
927    records as desired. By default, no formatter is specified; in this case,
928    the 'raw' message as determined by record.message is logged.
929    """
930    def __init__(self, level=NOTSET):
931        """
932        Initializes the instance - basically setting the formatter to None
933        and the filter list to empty.
934        """
935        Filterer.__init__(self)
936        self._name = None
937        self.level = _checkLevel(level)
938        self.formatter = None
939        self._closed = False
940        # Add the handler to the global _handlerList (for cleanup on shutdown)
941        _addHandlerRef(self)
942        self.createLock()
943
944    def get_name(self):
945        return self._name
946
947    def set_name(self, name):
948        with _lock:
949            if self._name in _handlers:
950                del _handlers[self._name]
951            self._name = name
952            if name:
953                _handlers[name] = self
954
955    name = property(get_name, set_name)
956
957    def createLock(self):
958        """
959        Acquire a thread lock for serializing access to the underlying I/O.
960        """
961        self.lock = threading.RLock()
962        _register_at_fork_reinit_lock(self)
963
964    def _at_fork_reinit(self):
965        self.lock._at_fork_reinit()
966
967    def acquire(self):
968        """
969        Acquire the I/O thread lock.
970        """
971        if self.lock:
972            self.lock.acquire()
973
974    def release(self):
975        """
976        Release the I/O thread lock.
977        """
978        if self.lock:
979            self.lock.release()
980
981    def setLevel(self, level):
982        """
983        Set the logging level of this handler.  level must be an int or a str.
984        """
985        self.level = _checkLevel(level)
986
987    def format(self, record):
988        """
989        Format the specified record.
990
991        If a formatter is set, use it. Otherwise, use the default formatter
992        for the module.
993        """
994        if self.formatter:
995            fmt = self.formatter
996        else:
997            fmt = _defaultFormatter
998        return fmt.format(record)
999
1000    def emit(self, record):
1001        """
1002        Do whatever it takes to actually log the specified logging record.
1003
1004        This version is intended to be implemented by subclasses and so
1005        raises a NotImplementedError.
1006        """
1007        raise NotImplementedError('emit must be implemented '
1008                                  'by Handler subclasses')
1009
1010    def handle(self, record):
1011        """
1012        Conditionally emit the specified logging record.
1013
1014        Emission depends on filters which may have been added to the handler.
1015        Wrap the actual emission of the record with acquisition/release of
1016        the I/O thread lock.
1017
1018        Returns an instance of the log record that was emitted
1019        if it passed all filters, otherwise a false value is returned.
1020        """
1021        rv = self.filter(record)
1022        if isinstance(rv, LogRecord):
1023            record = rv
1024        if rv:
1025            with self.lock:
1026                self.emit(record)
1027        return rv
1028
1029    def setFormatter(self, fmt):
1030        """
1031        Set the formatter for this handler.
1032        """
1033        self.formatter = fmt
1034
1035    def flush(self):
1036        """
1037        Ensure all logging output has been flushed.
1038
1039        This version does nothing and is intended to be implemented by
1040        subclasses.
1041        """
1042        pass
1043
1044    def close(self):
1045        """
1046        Tidy up any resources used by the handler.
1047
1048        This version removes the handler from an internal map of handlers,
1049        _handlers, which is used for handler lookup by name. Subclasses
1050        should ensure that this gets called from overridden close()
1051        methods.
1052        """
1053        #get the module data lock, as we're updating a shared structure.
1054        with _lock:
1055            self._closed = True
1056            if self._name and self._name in _handlers:
1057                del _handlers[self._name]
1058
1059    def handleError(self, record):
1060        """
1061        Handle errors which occur during an emit() call.
1062
1063        This method should be called from handlers when an exception is
1064        encountered during an emit() call. If raiseExceptions is false,
1065        exceptions get silently ignored. This is what is mostly wanted
1066        for a logging system - most users will not care about errors in
1067        the logging system, they are more interested in application errors.
1068        You could, however, replace this with a custom handler if you wish.
1069        The record which was being processed is passed in to this method.
1070        """
1071        if raiseExceptions and sys.stderr:  # see issue 13807
1072            exc = sys.exception()
1073            try:
1074                sys.stderr.write('--- Logging error ---\n')
1075                traceback.print_exception(exc, limit=None, file=sys.stderr)
1076                sys.stderr.write('Call stack:\n')
1077                # Walk the stack frame up until we're out of logging,
1078                # so as to print the calling context.
1079                frame = exc.__traceback__.tb_frame
1080                while (frame and os.path.dirname(frame.f_code.co_filename) ==
1081                       __path__[0]):
1082                    frame = frame.f_back
1083                if frame:
1084                    traceback.print_stack(frame, file=sys.stderr)
1085                else:
1086                    # couldn't find the right stack frame, for some reason
1087                    sys.stderr.write('Logged from file %s, line %s\n' % (
1088                                     record.filename, record.lineno))
1089                # Issue 18671: output logging message and arguments
1090                try:
1091                    sys.stderr.write('Message: %r\n'
1092                                     'Arguments: %s\n' % (record.msg,
1093                                                          record.args))
1094                except RecursionError:  # See issue 36272
1095                    raise
1096                except Exception:
1097                    sys.stderr.write('Unable to print the message and arguments'
1098                                     ' - possible formatting error.\nUse the'
1099                                     ' traceback above to help find the error.\n'
1100                                    )
1101            except OSError: #pragma: no cover
1102                pass    # see issue 5971
1103            finally:
1104                del exc
1105
1106    def __repr__(self):
1107        level = getLevelName(self.level)
1108        return '<%s (%s)>' % (self.__class__.__name__, level)
1109
1110class StreamHandler(Handler):
1111    """
1112    A handler class which writes logging records, appropriately formatted,
1113    to a stream. Note that this class does not close the stream, as
1114    sys.stdout or sys.stderr may be used.
1115    """
1116
1117    terminator = '\n'
1118
1119    def __init__(self, stream=None):
1120        """
1121        Initialize the handler.
1122
1123        If stream is not specified, sys.stderr is used.
1124        """
1125        Handler.__init__(self)
1126        if stream is None:
1127            stream = sys.stderr
1128        self.stream = stream
1129
1130    def flush(self):
1131        """
1132        Flushes the stream.
1133        """
1134        with self.lock:
1135            if self.stream and hasattr(self.stream, "flush"):
1136                self.stream.flush()
1137
1138    def emit(self, record):
1139        """
1140        Emit a record.
1141
1142        If a formatter is specified, it is used to format the record.
1143        The record is then written to the stream with a trailing newline.  If
1144        exception information is present, it is formatted using
1145        traceback.print_exception and appended to the stream.  If the stream
1146        has an 'encoding' attribute, it is used to determine how to do the
1147        output to the stream.
1148        """
1149        try:
1150            msg = self.format(record)
1151            stream = self.stream
1152            # issue 35046: merged two stream.writes into one.
1153            stream.write(msg + self.terminator)
1154            self.flush()
1155        except RecursionError:  # See issue 36272
1156            raise
1157        except Exception:
1158            self.handleError(record)
1159
1160    def setStream(self, stream):
1161        """
1162        Sets the StreamHandler's stream to the specified value,
1163        if it is different.
1164
1165        Returns the old stream, if the stream was changed, or None
1166        if it wasn't.
1167        """
1168        if stream is self.stream:
1169            result = None
1170        else:
1171            result = self.stream
1172            with self.lock:
1173                self.flush()
1174                self.stream = stream
1175        return result
1176
1177    def __repr__(self):
1178        level = getLevelName(self.level)
1179        name = getattr(self.stream, 'name', '')
1180        #  bpo-36015: name can be an int
1181        name = str(name)
1182        if name:
1183            name += ' '
1184        return '<%s %s(%s)>' % (self.__class__.__name__, name, level)
1185
1186    __class_getitem__ = classmethod(GenericAlias)
1187
1188
1189class FileHandler(StreamHandler):
1190    """
1191    A handler class which writes formatted logging records to disk files.
1192    """
1193    def __init__(self, filename, mode='a', encoding=None, delay=False, errors=None):
1194        """
1195        Open the specified file and use it as the stream for logging.
1196        """
1197        # Issue #27493: add support for Path objects to be passed in
1198        filename = os.fspath(filename)
1199        #keep the absolute path, otherwise derived classes which use this
1200        #may come a cropper when the current directory changes
1201        self.baseFilename = os.path.abspath(filename)
1202        self.mode = mode
1203        self.encoding = encoding
1204        if "b" not in mode:
1205            self.encoding = io.text_encoding(encoding)
1206        self.errors = errors
1207        self.delay = delay
1208        # bpo-26789: FileHandler keeps a reference to the builtin open()
1209        # function to be able to open or reopen the file during Python
1210        # finalization.
1211        self._builtin_open = open
1212        if delay:
1213            #We don't open the stream, but we still need to call the
1214            #Handler constructor to set level, formatter, lock etc.
1215            Handler.__init__(self)
1216            self.stream = None
1217        else:
1218            StreamHandler.__init__(self, self._open())
1219
1220    def close(self):
1221        """
1222        Closes the stream.
1223        """
1224        with self.lock:
1225            try:
1226                if self.stream:
1227                    try:
1228                        self.flush()
1229                    finally:
1230                        stream = self.stream
1231                        self.stream = None
1232                        if hasattr(stream, "close"):
1233                            stream.close()
1234            finally:
1235                # Issue #19523: call unconditionally to
1236                # prevent a handler leak when delay is set
1237                # Also see Issue #42378: we also rely on
1238                # self._closed being set to True there
1239                StreamHandler.close(self)
1240
1241    def _open(self):
1242        """
1243        Open the current base file with the (original) mode and encoding.
1244        Return the resulting stream.
1245        """
1246        open_func = self._builtin_open
1247        return open_func(self.baseFilename, self.mode,
1248                         encoding=self.encoding, errors=self.errors)
1249
1250    def emit(self, record):
1251        """
1252        Emit a record.
1253
1254        If the stream was not opened because 'delay' was specified in the
1255        constructor, open it before calling the superclass's emit.
1256
1257        If stream is not open, current mode is 'w' and `_closed=True`, record
1258        will not be emitted (see Issue #42378).
1259        """
1260        if self.stream is None:
1261            if self.mode != 'w' or not self._closed:
1262                self.stream = self._open()
1263        if self.stream:
1264            StreamHandler.emit(self, record)
1265
1266    def __repr__(self):
1267        level = getLevelName(self.level)
1268        return '<%s %s (%s)>' % (self.__class__.__name__, self.baseFilename, level)
1269
1270
1271class _StderrHandler(StreamHandler):
1272    """
1273    This class is like a StreamHandler using sys.stderr, but always uses
1274    whatever sys.stderr is currently set to rather than the value of
1275    sys.stderr at handler construction time.
1276    """
1277    def __init__(self, level=NOTSET):
1278        """
1279        Initialize the handler.
1280        """
1281        Handler.__init__(self, level)
1282
1283    @property
1284    def stream(self):
1285        return sys.stderr
1286
1287
1288_defaultLastResort = _StderrHandler(WARNING)
1289lastResort = _defaultLastResort
1290
1291#---------------------------------------------------------------------------
1292#   Manager classes and functions
1293#---------------------------------------------------------------------------
1294
1295class PlaceHolder(object):
1296    """
1297    PlaceHolder instances are used in the Manager logger hierarchy to take
1298    the place of nodes for which no loggers have been defined. This class is
1299    intended for internal use only and not as part of the public API.
1300    """
1301    def __init__(self, alogger):
1302        """
1303        Initialize with the specified logger being a child of this placeholder.
1304        """
1305        self.loggerMap = { alogger : None }
1306
1307    def append(self, alogger):
1308        """
1309        Add the specified logger as a child of this placeholder.
1310        """
1311        if alogger not in self.loggerMap:
1312            self.loggerMap[alogger] = None
1313
1314#
1315#   Determine which class to use when instantiating loggers.
1316#
1317
1318def setLoggerClass(klass):
1319    """
1320    Set the class to be used when instantiating a logger. The class should
1321    define __init__() such that only a name argument is required, and the
1322    __init__() should call Logger.__init__()
1323    """
1324    if klass != Logger:
1325        if not issubclass(klass, Logger):
1326            raise TypeError("logger not derived from logging.Logger: "
1327                            + klass.__name__)
1328    global _loggerClass
1329    _loggerClass = klass
1330
1331def getLoggerClass():
1332    """
1333    Return the class to be used when instantiating a logger.
1334    """
1335    return _loggerClass
1336
1337class Manager(object):
1338    """
1339    There is [under normal circumstances] just one Manager instance, which
1340    holds the hierarchy of loggers.
1341    """
1342    def __init__(self, rootnode):
1343        """
1344        Initialize the manager with the root node of the logger hierarchy.
1345        """
1346        self.root = rootnode
1347        self.disable = 0
1348        self.emittedNoHandlerWarning = False
1349        self.loggerDict = {}
1350        self.loggerClass = None
1351        self.logRecordFactory = None
1352
1353    @property
1354    def disable(self):
1355        return self._disable
1356
1357    @disable.setter
1358    def disable(self, value):
1359        self._disable = _checkLevel(value)
1360
1361    def getLogger(self, name):
1362        """
1363        Get a logger with the specified name (channel name), creating it
1364        if it doesn't yet exist. This name is a dot-separated hierarchical
1365        name, such as "a", "a.b", "a.b.c" or similar.
1366
1367        If a PlaceHolder existed for the specified name [i.e. the logger
1368        didn't exist but a child of it did], replace it with the created
1369        logger and fix up the parent/child references which pointed to the
1370        placeholder to now point to the logger.
1371        """
1372        rv = None
1373        if not isinstance(name, str):
1374            raise TypeError('A logger name must be a string')
1375        with _lock:
1376            if name in self.loggerDict:
1377                rv = self.loggerDict[name]
1378                if isinstance(rv, PlaceHolder):
1379                    ph = rv
1380                    rv = (self.loggerClass or _loggerClass)(name)
1381                    rv.manager = self
1382                    self.loggerDict[name] = rv
1383                    self._fixupChildren(ph, rv)
1384                    self._fixupParents(rv)
1385            else:
1386                rv = (self.loggerClass or _loggerClass)(name)
1387                rv.manager = self
1388                self.loggerDict[name] = rv
1389                self._fixupParents(rv)
1390        return rv
1391
1392    def setLoggerClass(self, klass):
1393        """
1394        Set the class to be used when instantiating a logger with this Manager.
1395        """
1396        if klass != Logger:
1397            if not issubclass(klass, Logger):
1398                raise TypeError("logger not derived from logging.Logger: "
1399                                + klass.__name__)
1400        self.loggerClass = klass
1401
1402    def setLogRecordFactory(self, factory):
1403        """
1404        Set the factory to be used when instantiating a log record with this
1405        Manager.
1406        """
1407        self.logRecordFactory = factory
1408
1409    def _fixupParents(self, alogger):
1410        """
1411        Ensure that there are either loggers or placeholders all the way
1412        from the specified logger to the root of the logger hierarchy.
1413        """
1414        name = alogger.name
1415        i = name.rfind(".")
1416        rv = None
1417        while (i > 0) and not rv:
1418            substr = name[:i]
1419            if substr not in self.loggerDict:
1420                self.loggerDict[substr] = PlaceHolder(alogger)
1421            else:
1422                obj = self.loggerDict[substr]
1423                if isinstance(obj, Logger):
1424                    rv = obj
1425                else:
1426                    assert isinstance(obj, PlaceHolder)
1427                    obj.append(alogger)
1428            i = name.rfind(".", 0, i - 1)
1429        if not rv:
1430            rv = self.root
1431        alogger.parent = rv
1432
1433    def _fixupChildren(self, ph, alogger):
1434        """
1435        Ensure that children of the placeholder ph are connected to the
1436        specified logger.
1437        """
1438        name = alogger.name
1439        namelen = len(name)
1440        for c in ph.loggerMap.keys():
1441            #The if means ... if not c.parent.name.startswith(nm)
1442            if c.parent.name[:namelen] != name:
1443                alogger.parent = c.parent
1444                c.parent = alogger
1445
1446    def _clear_cache(self):
1447        """
1448        Clear the cache for all loggers in loggerDict
1449        Called when level changes are made
1450        """
1451
1452        with _lock:
1453            for logger in self.loggerDict.values():
1454                if isinstance(logger, Logger):
1455                    logger._cache.clear()
1456            self.root._cache.clear()
1457
1458#---------------------------------------------------------------------------
1459#   Logger classes and functions
1460#---------------------------------------------------------------------------
1461
1462class Logger(Filterer):
1463    """
1464    Instances of the Logger class represent a single logging channel. A
1465    "logging channel" indicates an area of an application. Exactly how an
1466    "area" is defined is up to the application developer. Since an
1467    application can have any number of areas, logging channels are identified
1468    by a unique string. Application areas can be nested (e.g. an area
1469    of "input processing" might include sub-areas "read CSV files", "read
1470    XLS files" and "read Gnumeric files"). To cater for this natural nesting,
1471    channel names are organized into a namespace hierarchy where levels are
1472    separated by periods, much like the Java or Python package namespace. So
1473    in the instance given above, channel names might be "input" for the upper
1474    level, and "input.csv", "input.xls" and "input.gnu" for the sub-levels.
1475    There is no arbitrary limit to the depth of nesting.
1476    """
1477    def __init__(self, name, level=NOTSET):
1478        """
1479        Initialize the logger with a name and an optional level.
1480        """
1481        Filterer.__init__(self)
1482        self.name = name
1483        self.level = _checkLevel(level)
1484        self.parent = None
1485        self.propagate = True
1486        self.handlers = []
1487        self.disabled = False
1488        self._cache = {}
1489
1490    def setLevel(self, level):
1491        """
1492        Set the logging level of this logger.  level must be an int or a str.
1493        """
1494        self.level = _checkLevel(level)
1495        self.manager._clear_cache()
1496
1497    def debug(self, msg, *args, **kwargs):
1498        """
1499        Log 'msg % args' with severity 'DEBUG'.
1500
1501        To pass exception information, use the keyword argument exc_info with
1502        a true value, e.g.
1503
1504        logger.debug("Houston, we have a %s", "thorny problem", exc_info=True)
1505        """
1506        if self.isEnabledFor(DEBUG):
1507            self._log(DEBUG, msg, args, **kwargs)
1508
1509    def info(self, msg, *args, **kwargs):
1510        """
1511        Log 'msg % args' with severity 'INFO'.
1512
1513        To pass exception information, use the keyword argument exc_info with
1514        a true value, e.g.
1515
1516        logger.info("Houston, we have a %s", "notable problem", exc_info=True)
1517        """
1518        if self.isEnabledFor(INFO):
1519            self._log(INFO, msg, args, **kwargs)
1520
1521    def warning(self, msg, *args, **kwargs):
1522        """
1523        Log 'msg % args' with severity 'WARNING'.
1524
1525        To pass exception information, use the keyword argument exc_info with
1526        a true value, e.g.
1527
1528        logger.warning("Houston, we have a %s", "bit of a problem", exc_info=True)
1529        """
1530        if self.isEnabledFor(WARNING):
1531            self._log(WARNING, msg, args, **kwargs)
1532
1533    def warn(self, msg, *args, **kwargs):
1534        warnings.warn("The 'warn' method is deprecated, "
1535            "use 'warning' instead", DeprecationWarning, 2)
1536        self.warning(msg, *args, **kwargs)
1537
1538    def error(self, msg, *args, **kwargs):
1539        """
1540        Log 'msg % args' with severity 'ERROR'.
1541
1542        To pass exception information, use the keyword argument exc_info with
1543        a true value, e.g.
1544
1545        logger.error("Houston, we have a %s", "major problem", exc_info=True)
1546        """
1547        if self.isEnabledFor(ERROR):
1548            self._log(ERROR, msg, args, **kwargs)
1549
1550    def exception(self, msg, *args, exc_info=True, **kwargs):
1551        """
1552        Convenience method for logging an ERROR with exception information.
1553        """
1554        self.error(msg, *args, exc_info=exc_info, **kwargs)
1555
1556    def critical(self, msg, *args, **kwargs):
1557        """
1558        Log 'msg % args' with severity 'CRITICAL'.
1559
1560        To pass exception information, use the keyword argument exc_info with
1561        a true value, e.g.
1562
1563        logger.critical("Houston, we have a %s", "major disaster", exc_info=True)
1564        """
1565        if self.isEnabledFor(CRITICAL):
1566            self._log(CRITICAL, msg, args, **kwargs)
1567
1568    def fatal(self, msg, *args, **kwargs):
1569        """
1570        Don't use this method, use critical() instead.
1571        """
1572        self.critical(msg, *args, **kwargs)
1573
1574    def log(self, level, msg, *args, **kwargs):
1575        """
1576        Log 'msg % args' with the integer severity 'level'.
1577
1578        To pass exception information, use the keyword argument exc_info with
1579        a true value, e.g.
1580
1581        logger.log(level, "We have a %s", "mysterious problem", exc_info=True)
1582        """
1583        if not isinstance(level, int):
1584            if raiseExceptions:
1585                raise TypeError("level must be an integer")
1586            else:
1587                return
1588        if self.isEnabledFor(level):
1589            self._log(level, msg, args, **kwargs)
1590
1591    def findCaller(self, stack_info=False, stacklevel=1):
1592        """
1593        Find the stack frame of the caller so that we can note the source
1594        file name, line number and function name.
1595        """
1596        f = currentframe()
1597        #On some versions of IronPython, currentframe() returns None if
1598        #IronPython isn't run with -X:Frames.
1599        if f is None:
1600            return "(unknown file)", 0, "(unknown function)", None
1601        while stacklevel > 0:
1602            next_f = f.f_back
1603            if next_f is None:
1604                ## We've got options here.
1605                ## If we want to use the last (deepest) frame:
1606                break
1607                ## If we want to mimic the warnings module:
1608                #return ("sys", 1, "(unknown function)", None)
1609                ## If we want to be pedantic:
1610                #raise ValueError("call stack is not deep enough")
1611            f = next_f
1612            if not _is_internal_frame(f):
1613                stacklevel -= 1
1614        co = f.f_code
1615        sinfo = None
1616        if stack_info:
1617            with io.StringIO() as sio:
1618                sio.write("Stack (most recent call last):\n")
1619                traceback.print_stack(f, file=sio)
1620                sinfo = sio.getvalue()
1621                if sinfo[-1] == '\n':
1622                    sinfo = sinfo[:-1]
1623        return co.co_filename, f.f_lineno, co.co_name, sinfo
1624
1625    def makeRecord(self, name, level, fn, lno, msg, args, exc_info,
1626                   func=None, extra=None, sinfo=None):
1627        """
1628        A factory method which can be overridden in subclasses to create
1629        specialized LogRecords.
1630        """
1631        rv = _logRecordFactory(name, level, fn, lno, msg, args, exc_info, func,
1632                             sinfo)
1633        if extra is not None:
1634            for key in extra:
1635                if (key in ["message", "asctime"]) or (key in rv.__dict__):
1636                    raise KeyError("Attempt to overwrite %r in LogRecord" % key)
1637                rv.__dict__[key] = extra[key]
1638        return rv
1639
1640    def _log(self, level, msg, args, exc_info=None, extra=None, stack_info=False,
1641             stacklevel=1):
1642        """
1643        Low-level logging routine which creates a LogRecord and then calls
1644        all the handlers of this logger to handle the record.
1645        """
1646        sinfo = None
1647        if _srcfile:
1648            #IronPython doesn't track Python frames, so findCaller raises an
1649            #exception on some versions of IronPython. We trap it here so that
1650            #IronPython can use logging.
1651            try:
1652                fn, lno, func, sinfo = self.findCaller(stack_info, stacklevel)
1653            except ValueError: # pragma: no cover
1654                fn, lno, func = "(unknown file)", 0, "(unknown function)"
1655        else: # pragma: no cover
1656            fn, lno, func = "(unknown file)", 0, "(unknown function)"
1657        if exc_info:
1658            if isinstance(exc_info, BaseException):
1659                exc_info = (type(exc_info), exc_info, exc_info.__traceback__)
1660            elif not isinstance(exc_info, tuple):
1661                exc_info = sys.exc_info()
1662        record = self.makeRecord(self.name, level, fn, lno, msg, args,
1663                                 exc_info, func, extra, sinfo)
1664        self.handle(record)
1665
1666    def handle(self, record):
1667        """
1668        Call the handlers for the specified record.
1669
1670        This method is used for unpickled records received from a socket, as
1671        well as those created locally. Logger-level filtering is applied.
1672        """
1673        if self.disabled:
1674            return
1675        maybe_record = self.filter(record)
1676        if not maybe_record:
1677            return
1678        if isinstance(maybe_record, LogRecord):
1679            record = maybe_record
1680        self.callHandlers(record)
1681
1682    def addHandler(self, hdlr):
1683        """
1684        Add the specified handler to this logger.
1685        """
1686        with _lock:
1687            if not (hdlr in self.handlers):
1688                self.handlers.append(hdlr)
1689
1690    def removeHandler(self, hdlr):
1691        """
1692        Remove the specified handler from this logger.
1693        """
1694        with _lock:
1695            if hdlr in self.handlers:
1696                self.handlers.remove(hdlr)
1697
1698    def hasHandlers(self):
1699        """
1700        See if this logger has any handlers configured.
1701
1702        Loop through all handlers for this logger and its parents in the
1703        logger hierarchy. Return True if a handler was found, else False.
1704        Stop searching up the hierarchy whenever a logger with the "propagate"
1705        attribute set to zero is found - that will be the last logger which
1706        is checked for the existence of handlers.
1707        """
1708        c = self
1709        rv = False
1710        while c:
1711            if c.handlers:
1712                rv = True
1713                break
1714            if not c.propagate:
1715                break
1716            else:
1717                c = c.parent
1718        return rv
1719
1720    def callHandlers(self, record):
1721        """
1722        Pass a record to all relevant handlers.
1723
1724        Loop through all handlers for this logger and its parents in the
1725        logger hierarchy. If no handler was found, output a one-off error
1726        message to sys.stderr. Stop searching up the hierarchy whenever a
1727        logger with the "propagate" attribute set to zero is found - that
1728        will be the last logger whose handlers are called.
1729        """
1730        c = self
1731        found = 0
1732        while c:
1733            for hdlr in c.handlers:
1734                found = found + 1
1735                if record.levelno >= hdlr.level:
1736                    hdlr.handle(record)
1737            if not c.propagate:
1738                c = None    #break out
1739            else:
1740                c = c.parent
1741        if (found == 0):
1742            if lastResort:
1743                if record.levelno >= lastResort.level:
1744                    lastResort.handle(record)
1745            elif raiseExceptions and not self.manager.emittedNoHandlerWarning:
1746                sys.stderr.write("No handlers could be found for logger"
1747                                 " \"%s\"\n" % self.name)
1748                self.manager.emittedNoHandlerWarning = True
1749
1750    def getEffectiveLevel(self):
1751        """
1752        Get the effective level for this logger.
1753
1754        Loop through this logger and its parents in the logger hierarchy,
1755        looking for a non-zero logging level. Return the first one found.
1756        """
1757        logger = self
1758        while logger:
1759            if logger.level:
1760                return logger.level
1761            logger = logger.parent
1762        return NOTSET
1763
1764    def isEnabledFor(self, level):
1765        """
1766        Is this logger enabled for level 'level'?
1767        """
1768        if self.disabled:
1769            return False
1770
1771        try:
1772            return self._cache[level]
1773        except KeyError:
1774            with _lock:
1775                if self.manager.disable >= level:
1776                    is_enabled = self._cache[level] = False
1777                else:
1778                    is_enabled = self._cache[level] = (
1779                        level >= self.getEffectiveLevel()
1780                    )
1781            return is_enabled
1782
1783    def getChild(self, suffix):
1784        """
1785        Get a logger which is a descendant to this one.
1786
1787        This is a convenience method, such that
1788
1789        logging.getLogger('abc').getChild('def.ghi')
1790
1791        is the same as
1792
1793        logging.getLogger('abc.def.ghi')
1794
1795        It's useful, for example, when the parent logger is named using
1796        __name__ rather than a literal string.
1797        """
1798        if self.root is not self:
1799            suffix = '.'.join((self.name, suffix))
1800        return self.manager.getLogger(suffix)
1801
1802    def getChildren(self):
1803
1804        def _hierlevel(logger):
1805            if logger is logger.manager.root:
1806                return 0
1807            return 1 + logger.name.count('.')
1808
1809        d = self.manager.loggerDict
1810        with _lock:
1811            # exclude PlaceHolders - the last check is to ensure that lower-level
1812            # descendants aren't returned - if there are placeholders, a logger's
1813            # parent field might point to a grandparent or ancestor thereof.
1814            return set(item for item in d.values()
1815                       if isinstance(item, Logger) and item.parent is self and
1816                       _hierlevel(item) == 1 + _hierlevel(item.parent))
1817
1818    def __repr__(self):
1819        level = getLevelName(self.getEffectiveLevel())
1820        return '<%s %s (%s)>' % (self.__class__.__name__, self.name, level)
1821
1822    def __reduce__(self):
1823        if getLogger(self.name) is not self:
1824            import pickle
1825            raise pickle.PicklingError('logger cannot be pickled')
1826        return getLogger, (self.name,)
1827
1828
1829class RootLogger(Logger):
1830    """
1831    A root logger is not that different to any other logger, except that
1832    it must have a logging level and there is only one instance of it in
1833    the hierarchy.
1834    """
1835    def __init__(self, level):
1836        """
1837        Initialize the logger with the name "root".
1838        """
1839        Logger.__init__(self, "root", level)
1840
1841    def __reduce__(self):
1842        return getLogger, ()
1843
1844_loggerClass = Logger
1845
1846class LoggerAdapter(object):
1847    """
1848    An adapter for loggers which makes it easier to specify contextual
1849    information in logging output.
1850    """
1851
1852    def __init__(self, logger, extra=None, merge_extra=False):
1853        """
1854        Initialize the adapter with a logger and a dict-like object which
1855        provides contextual information. This constructor signature allows
1856        easy stacking of LoggerAdapters, if so desired.
1857
1858        You can effectively pass keyword arguments as shown in the
1859        following example:
1860
1861        adapter = LoggerAdapter(someLogger, dict(p1=v1, p2="v2"))
1862
1863        By default, LoggerAdapter objects will drop the "extra" argument
1864        passed on the individual log calls to use its own instead.
1865
1866        Initializing it with merge_extra=True will instead merge both
1867        maps when logging, the individual call extra taking precedence
1868        over the LoggerAdapter instance extra
1869
1870        .. versionchanged:: 3.13
1871           The *merge_extra* argument was added.
1872        """
1873        self.logger = logger
1874        self.extra = extra
1875        self.merge_extra = merge_extra
1876
1877    def process(self, msg, kwargs):
1878        """
1879        Process the logging message and keyword arguments passed in to
1880        a logging call to insert contextual information. You can either
1881        manipulate the message itself, the keyword args or both. Return
1882        the message and kwargs modified (or not) to suit your needs.
1883
1884        Normally, you'll only need to override this one method in a
1885        LoggerAdapter subclass for your specific needs.
1886        """
1887        if self.merge_extra and "extra" in kwargs:
1888            kwargs["extra"] = {**self.extra, **kwargs["extra"]}
1889        else:
1890            kwargs["extra"] = self.extra
1891        return msg, kwargs
1892
1893    #
1894    # Boilerplate convenience methods
1895    #
1896    def debug(self, msg, *args, **kwargs):
1897        """
1898        Delegate a debug call to the underlying logger.
1899        """
1900        self.log(DEBUG, msg, *args, **kwargs)
1901
1902    def info(self, msg, *args, **kwargs):
1903        """
1904        Delegate an info call to the underlying logger.
1905        """
1906        self.log(INFO, msg, *args, **kwargs)
1907
1908    def warning(self, msg, *args, **kwargs):
1909        """
1910        Delegate a warning call to the underlying logger.
1911        """
1912        self.log(WARNING, msg, *args, **kwargs)
1913
1914    def warn(self, msg, *args, **kwargs):
1915        warnings.warn("The 'warn' method is deprecated, "
1916            "use 'warning' instead", DeprecationWarning, 2)
1917        self.warning(msg, *args, **kwargs)
1918
1919    def error(self, msg, *args, **kwargs):
1920        """
1921        Delegate an error call to the underlying logger.
1922        """
1923        self.log(ERROR, msg, *args, **kwargs)
1924
1925    def exception(self, msg, *args, exc_info=True, **kwargs):
1926        """
1927        Delegate an exception call to the underlying logger.
1928        """
1929        self.log(ERROR, msg, *args, exc_info=exc_info, **kwargs)
1930
1931    def critical(self, msg, *args, **kwargs):
1932        """
1933        Delegate a critical call to the underlying logger.
1934        """
1935        self.log(CRITICAL, msg, *args, **kwargs)
1936
1937    def log(self, level, msg, *args, **kwargs):
1938        """
1939        Delegate a log call to the underlying logger, after adding
1940        contextual information from this adapter instance.
1941        """
1942        if self.isEnabledFor(level):
1943            msg, kwargs = self.process(msg, kwargs)
1944            self.logger.log(level, msg, *args, **kwargs)
1945
1946    def isEnabledFor(self, level):
1947        """
1948        Is this logger enabled for level 'level'?
1949        """
1950        return self.logger.isEnabledFor(level)
1951
1952    def setLevel(self, level):
1953        """
1954        Set the specified level on the underlying logger.
1955        """
1956        self.logger.setLevel(level)
1957
1958    def getEffectiveLevel(self):
1959        """
1960        Get the effective level for the underlying logger.
1961        """
1962        return self.logger.getEffectiveLevel()
1963
1964    def hasHandlers(self):
1965        """
1966        See if the underlying logger has any handlers.
1967        """
1968        return self.logger.hasHandlers()
1969
1970    def _log(self, level, msg, args, **kwargs):
1971        """
1972        Low-level log implementation, proxied to allow nested logger adapters.
1973        """
1974        return self.logger._log(level, msg, args, **kwargs)
1975
1976    @property
1977    def manager(self):
1978        return self.logger.manager
1979
1980    @manager.setter
1981    def manager(self, value):
1982        self.logger.manager = value
1983
1984    @property
1985    def name(self):
1986        return self.logger.name
1987
1988    def __repr__(self):
1989        logger = self.logger
1990        level = getLevelName(logger.getEffectiveLevel())
1991        return '<%s %s (%s)>' % (self.__class__.__name__, logger.name, level)
1992
1993    __class_getitem__ = classmethod(GenericAlias)
1994
1995root = RootLogger(WARNING)
1996Logger.root = root
1997Logger.manager = Manager(Logger.root)
1998
1999#---------------------------------------------------------------------------
2000# Configuration classes and functions
2001#---------------------------------------------------------------------------
2002
2003def basicConfig(**kwargs):
2004    """
2005    Do basic configuration for the logging system.
2006
2007    This function does nothing if the root logger already has handlers
2008    configured, unless the keyword argument *force* is set to ``True``.
2009    It is a convenience method intended for use by simple scripts
2010    to do one-shot configuration of the logging package.
2011
2012    The default behaviour is to create a StreamHandler which writes to
2013    sys.stderr, set a formatter using the BASIC_FORMAT format string, and
2014    add the handler to the root logger.
2015
2016    A number of optional keyword arguments may be specified, which can alter
2017    the default behaviour.
2018
2019    filename  Specifies that a FileHandler be created, using the specified
2020              filename, rather than a StreamHandler.
2021    filemode  Specifies the mode to open the file, if filename is specified
2022              (if filemode is unspecified, it defaults to 'a').
2023    format    Use the specified format string for the handler.
2024    datefmt   Use the specified date/time format.
2025    style     If a format string is specified, use this to specify the
2026              type of format string (possible values '%', '{', '$', for
2027              %-formatting, :meth:`str.format` and :class:`string.Template`
2028              - defaults to '%').
2029    level     Set the root logger level to the specified level.
2030    stream    Use the specified stream to initialize the StreamHandler. Note
2031              that this argument is incompatible with 'filename' - if both
2032              are present, 'stream' is ignored.
2033    handlers  If specified, this should be an iterable of already created
2034              handlers, which will be added to the root logger. Any handler
2035              in the list which does not have a formatter assigned will be
2036              assigned the formatter created in this function.
2037    force     If this keyword  is specified as true, any existing handlers
2038              attached to the root logger are removed and closed, before
2039              carrying out the configuration as specified by the other
2040              arguments.
2041    encoding  If specified together with a filename, this encoding is passed to
2042              the created FileHandler, causing it to be used when the file is
2043              opened.
2044    errors    If specified together with a filename, this value is passed to the
2045              created FileHandler, causing it to be used when the file is
2046              opened in text mode. If not specified, the default value is
2047              `backslashreplace`.
2048
2049    Note that you could specify a stream created using open(filename, mode)
2050    rather than passing the filename and mode in. However, it should be
2051    remembered that StreamHandler does not close its stream (since it may be
2052    using sys.stdout or sys.stderr), whereas FileHandler closes its stream
2053    when the handler is closed.
2054
2055    .. versionchanged:: 3.2
2056       Added the ``style`` parameter.
2057
2058    .. versionchanged:: 3.3
2059       Added the ``handlers`` parameter. A ``ValueError`` is now thrown for
2060       incompatible arguments (e.g. ``handlers`` specified together with
2061       ``filename``/``filemode``, or ``filename``/``filemode`` specified
2062       together with ``stream``, or ``handlers`` specified together with
2063       ``stream``.
2064
2065    .. versionchanged:: 3.8
2066       Added the ``force`` parameter.
2067
2068    .. versionchanged:: 3.9
2069       Added the ``encoding`` and ``errors`` parameters.
2070    """
2071    # Add thread safety in case someone mistakenly calls
2072    # basicConfig() from multiple threads
2073    with _lock:
2074        force = kwargs.pop('force', False)
2075        encoding = kwargs.pop('encoding', None)
2076        errors = kwargs.pop('errors', 'backslashreplace')
2077        if force:
2078            for h in root.handlers[:]:
2079                root.removeHandler(h)
2080                h.close()
2081        if len(root.handlers) == 0:
2082            handlers = kwargs.pop("handlers", None)
2083            if handlers is None:
2084                if "stream" in kwargs and "filename" in kwargs:
2085                    raise ValueError("'stream' and 'filename' should not be "
2086                                     "specified together")
2087            else:
2088                if "stream" in kwargs or "filename" in kwargs:
2089                    raise ValueError("'stream' or 'filename' should not be "
2090                                     "specified together with 'handlers'")
2091            if handlers is None:
2092                filename = kwargs.pop("filename", None)
2093                mode = kwargs.pop("filemode", 'a')
2094                if filename:
2095                    if 'b' in mode:
2096                        errors = None
2097                    else:
2098                        encoding = io.text_encoding(encoding)
2099                    h = FileHandler(filename, mode,
2100                                    encoding=encoding, errors=errors)
2101                else:
2102                    stream = kwargs.pop("stream", None)
2103                    h = StreamHandler(stream)
2104                handlers = [h]
2105            dfs = kwargs.pop("datefmt", None)
2106            style = kwargs.pop("style", '%')
2107            if style not in _STYLES:
2108                raise ValueError('Style must be one of: %s' % ','.join(
2109                                 _STYLES.keys()))
2110            fs = kwargs.pop("format", _STYLES[style][1])
2111            fmt = Formatter(fs, dfs, style)
2112            for h in handlers:
2113                if h.formatter is None:
2114                    h.setFormatter(fmt)
2115                root.addHandler(h)
2116            level = kwargs.pop("level", None)
2117            if level is not None:
2118                root.setLevel(level)
2119            if kwargs:
2120                keys = ', '.join(kwargs.keys())
2121                raise ValueError('Unrecognised argument(s): %s' % keys)
2122
2123#---------------------------------------------------------------------------
2124# Utility functions at module level.
2125# Basically delegate everything to the root logger.
2126#---------------------------------------------------------------------------
2127
2128def getLogger(name=None):
2129    """
2130    Return a logger with the specified name, creating it if necessary.
2131
2132    If no name is specified, return the root logger.
2133    """
2134    if not name or isinstance(name, str) and name == root.name:
2135        return root
2136    return Logger.manager.getLogger(name)
2137
2138def critical(msg, *args, **kwargs):
2139    """
2140    Log a message with severity 'CRITICAL' on the root logger. If the logger
2141    has no handlers, call basicConfig() to add a console handler with a
2142    pre-defined format.
2143    """
2144    if len(root.handlers) == 0:
2145        basicConfig()
2146    root.critical(msg, *args, **kwargs)
2147
2148def fatal(msg, *args, **kwargs):
2149    """
2150    Don't use this function, use critical() instead.
2151    """
2152    critical(msg, *args, **kwargs)
2153
2154def error(msg, *args, **kwargs):
2155    """
2156    Log a message with severity 'ERROR' on the root logger. If the logger has
2157    no handlers, call basicConfig() to add a console handler with a pre-defined
2158    format.
2159    """
2160    if len(root.handlers) == 0:
2161        basicConfig()
2162    root.error(msg, *args, **kwargs)
2163
2164def exception(msg, *args, exc_info=True, **kwargs):
2165    """
2166    Log a message with severity 'ERROR' on the root logger, with exception
2167    information. If the logger has no handlers, basicConfig() is called to add
2168    a console handler with a pre-defined format.
2169    """
2170    error(msg, *args, exc_info=exc_info, **kwargs)
2171
2172def warning(msg, *args, **kwargs):
2173    """
2174    Log a message with severity 'WARNING' on the root logger. If the logger has
2175    no handlers, call basicConfig() to add a console handler with a pre-defined
2176    format.
2177    """
2178    if len(root.handlers) == 0:
2179        basicConfig()
2180    root.warning(msg, *args, **kwargs)
2181
2182def warn(msg, *args, **kwargs):
2183    warnings.warn("The 'warn' function is deprecated, "
2184        "use 'warning' instead", DeprecationWarning, 2)
2185    warning(msg, *args, **kwargs)
2186
2187def info(msg, *args, **kwargs):
2188    """
2189    Log a message with severity 'INFO' on the root logger. If the logger has
2190    no handlers, call basicConfig() to add a console handler with a pre-defined
2191    format.
2192    """
2193    if len(root.handlers) == 0:
2194        basicConfig()
2195    root.info(msg, *args, **kwargs)
2196
2197def debug(msg, *args, **kwargs):
2198    """
2199    Log a message with severity 'DEBUG' on the root logger. If the logger has
2200    no handlers, call basicConfig() to add a console handler with a pre-defined
2201    format.
2202    """
2203    if len(root.handlers) == 0:
2204        basicConfig()
2205    root.debug(msg, *args, **kwargs)
2206
2207def log(level, msg, *args, **kwargs):
2208    """
2209    Log 'msg % args' with the integer severity 'level' on the root logger. If
2210    the logger has no handlers, call basicConfig() to add a console handler
2211    with a pre-defined format.
2212    """
2213    if len(root.handlers) == 0:
2214        basicConfig()
2215    root.log(level, msg, *args, **kwargs)
2216
2217def disable(level=CRITICAL):
2218    """
2219    Disable all logging calls of severity 'level' and below.
2220    """
2221    root.manager.disable = level
2222    root.manager._clear_cache()
2223
2224def shutdown(handlerList=_handlerList):
2225    """
2226    Perform any cleanup actions in the logging system (e.g. flushing
2227    buffers).
2228
2229    Should be called at application exit.
2230    """
2231    for wr in reversed(handlerList[:]):
2232        #errors might occur, for example, if files are locked
2233        #we just ignore them if raiseExceptions is not set
2234        try:
2235            h = wr()
2236            if h:
2237                try:
2238                    h.acquire()
2239                    # MemoryHandlers might not want to be flushed on close,
2240                    # but circular imports prevent us scoping this to just
2241                    # those handlers.  hence the default to True.
2242                    if getattr(h, 'flushOnClose', True):
2243                        h.flush()
2244                    h.close()
2245                except (OSError, ValueError):
2246                    # Ignore errors which might be caused
2247                    # because handlers have been closed but
2248                    # references to them are still around at
2249                    # application exit.
2250                    pass
2251                finally:
2252                    h.release()
2253        except: # ignore everything, as we're shutting down
2254            if raiseExceptions:
2255                raise
2256            #else, swallow
2257
2258#Let's try and shutdown automatically on application exit...
2259import atexit
2260atexit.register(shutdown)
2261
2262# Null handler
2263
2264class NullHandler(Handler):
2265    """
2266    This handler does nothing. It's intended to be used to avoid the
2267    "No handlers could be found for logger XXX" one-off warning. This is
2268    important for library code, which may contain code to log events. If a user
2269    of the library does not configure logging, the one-off warning might be
2270    produced; to avoid this, the library developer simply needs to instantiate
2271    a NullHandler and add it to the top-level logger of the library module or
2272    package.
2273    """
2274    def handle(self, record):
2275        """Stub."""
2276
2277    def emit(self, record):
2278        """Stub."""
2279
2280    def createLock(self):
2281        self.lock = None
2282
2283    def _at_fork_reinit(self):
2284        pass
2285
2286# Warnings integration
2287
2288_warnings_showwarning = None
2289
2290def _showwarning(message, category, filename, lineno, file=None, line=None):
2291    """
2292    Implementation of showwarnings which redirects to logging, which will first
2293    check to see if the file parameter is None. If a file is specified, it will
2294    delegate to the original warnings implementation of showwarning. Otherwise,
2295    it will call warnings.formatwarning and will log the resulting string to a
2296    warnings logger named "py.warnings" with level logging.WARNING.
2297    """
2298    if file is not None:
2299        if _warnings_showwarning is not None:
2300            _warnings_showwarning(message, category, filename, lineno, file, line)
2301    else:
2302        s = warnings.formatwarning(message, category, filename, lineno, line)
2303        logger = getLogger("py.warnings")
2304        if not logger.handlers:
2305            logger.addHandler(NullHandler())
2306        # bpo-46557: Log str(s) as msg instead of logger.warning("%s", s)
2307        # since some log aggregation tools group logs by the msg arg
2308        logger.warning(str(s))
2309
2310def captureWarnings(capture):
2311    """
2312    If capture is true, redirect all warnings to the logging package.
2313    If capture is False, ensure that warnings are not redirected to logging
2314    but to their original destinations.
2315    """
2316    global _warnings_showwarning
2317    if capture:
2318        if _warnings_showwarning is None:
2319            _warnings_showwarning = warnings.showwarning
2320            warnings.showwarning = _showwarning
2321    else:
2322        if _warnings_showwarning is not None:
2323            warnings.showwarning = _warnings_showwarning
2324            _warnings_showwarning = None
2325