• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2001-2021 by Vinay Sajip. All Rights Reserved.
2#
3# Permission to use, copy, modify, and distribute this software and its
4# documentation for any purpose and without fee is hereby granted,
5# provided that the above copyright notice appear in all copies and that
6# both that copyright notice and this permission notice appear in
7# supporting documentation, and that the name of Vinay Sajip
8# not be used in advertising or publicity pertaining to distribution
9# of the software without specific, written prior permission.
10# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
11# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
12# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
13# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
14# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
15# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16
17"""
18Additional handlers for the logging package for Python. The core package is
19based on PEP 282 and comments thereto in comp.lang.python.
20
21Copyright (C) 2001-2021 Vinay Sajip. All Rights Reserved.
22
23To use, simply 'import logging.handlers' and log away!
24"""
25
26import copy
27import io
28import logging
29import os
30import pickle
31import queue
32import re
33import socket
34import struct
35import threading
36import time
37
38#
39# Some constants...
40#
41
42DEFAULT_TCP_LOGGING_PORT    = 9020
43DEFAULT_UDP_LOGGING_PORT    = 9021
44DEFAULT_HTTP_LOGGING_PORT   = 9022
45DEFAULT_SOAP_LOGGING_PORT   = 9023
46SYSLOG_UDP_PORT             = 514
47SYSLOG_TCP_PORT             = 514
48
49_MIDNIGHT = 24 * 60 * 60  # number of seconds in a day
50
51class BaseRotatingHandler(logging.FileHandler):
52    """
53    Base class for handlers that rotate log files at a certain point.
54    Not meant to be instantiated directly.  Instead, use RotatingFileHandler
55    or TimedRotatingFileHandler.
56    """
57    namer = None
58    rotator = None
59
60    def __init__(self, filename, mode, encoding=None, delay=False, errors=None):
61        """
62        Use the specified filename for streamed logging
63        """
64        logging.FileHandler.__init__(self, filename, mode=mode,
65                                     encoding=encoding, delay=delay,
66                                     errors=errors)
67        self.mode = mode
68        self.encoding = encoding
69        self.errors = errors
70
71    def emit(self, record):
72        """
73        Emit a record.
74
75        Output the record to the file, catering for rollover as described
76        in doRollover().
77        """
78        try:
79            if self.shouldRollover(record):
80                self.doRollover()
81            logging.FileHandler.emit(self, record)
82        except Exception:
83            self.handleError(record)
84
85    def rotation_filename(self, default_name):
86        """
87        Modify the filename of a log file when rotating.
88
89        This is provided so that a custom filename can be provided.
90
91        The default implementation calls the 'namer' attribute of the
92        handler, if it's callable, passing the default name to
93        it. If the attribute isn't callable (the default is None), the name
94        is returned unchanged.
95
96        :param default_name: The default name for the log file.
97        """
98        if not callable(self.namer):
99            result = default_name
100        else:
101            result = self.namer(default_name)
102        return result
103
104    def rotate(self, source, dest):
105        """
106        When rotating, rotate the current log.
107
108        The default implementation calls the 'rotator' attribute of the
109        handler, if it's callable, passing the source and dest arguments to
110        it. If the attribute isn't callable (the default is None), the source
111        is simply renamed to the destination.
112
113        :param source: The source filename. This is normally the base
114                       filename, e.g. 'test.log'
115        :param dest:   The destination filename. This is normally
116                       what the source is rotated to, e.g. 'test.log.1'.
117        """
118        if not callable(self.rotator):
119            # Issue 18940: A file may not have been created if delay is True.
120            if os.path.exists(source):
121                os.rename(source, dest)
122        else:
123            self.rotator(source, dest)
124
125class RotatingFileHandler(BaseRotatingHandler):
126    """
127    Handler for logging to a set of files, which switches from one file
128    to the next when the current file reaches a certain size.
129    """
130    def __init__(self, filename, mode='a', maxBytes=0, backupCount=0,
131                 encoding=None, delay=False, errors=None):
132        """
133        Open the specified file and use it as the stream for logging.
134
135        By default, the file grows indefinitely. You can specify particular
136        values of maxBytes and backupCount to allow the file to rollover at
137        a predetermined size.
138
139        Rollover occurs whenever the current log file is nearly maxBytes in
140        length. If backupCount is >= 1, the system will successively create
141        new files with the same pathname as the base file, but with extensions
142        ".1", ".2" etc. appended to it. For example, with a backupCount of 5
143        and a base file name of "app.log", you would get "app.log",
144        "app.log.1", "app.log.2", ... through to "app.log.5". The file being
145        written to is always "app.log" - when it gets filled up, it is closed
146        and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
147        exist, then they are renamed to "app.log.2", "app.log.3" etc.
148        respectively.
149
150        If maxBytes is zero, rollover never occurs.
151        """
152        # If rotation/rollover is wanted, it doesn't make sense to use another
153        # mode. If for example 'w' were specified, then if there were multiple
154        # runs of the calling application, the logs from previous runs would be
155        # lost if the 'w' is respected, because the log file would be truncated
156        # on each run.
157        if maxBytes > 0:
158            mode = 'a'
159        if "b" not in mode:
160            encoding = io.text_encoding(encoding)
161        BaseRotatingHandler.__init__(self, filename, mode, encoding=encoding,
162                                     delay=delay, errors=errors)
163        self.maxBytes = maxBytes
164        self.backupCount = backupCount
165
166    def doRollover(self):
167        """
168        Do a rollover, as described in __init__().
169        """
170        if self.stream:
171            self.stream.close()
172            self.stream = None
173        if self.backupCount > 0:
174            for i in range(self.backupCount - 1, 0, -1):
175                sfn = self.rotation_filename("%s.%d" % (self.baseFilename, i))
176                dfn = self.rotation_filename("%s.%d" % (self.baseFilename,
177                                                        i + 1))
178                if os.path.exists(sfn):
179                    if os.path.exists(dfn):
180                        os.remove(dfn)
181                    os.rename(sfn, dfn)
182            dfn = self.rotation_filename(self.baseFilename + ".1")
183            if os.path.exists(dfn):
184                os.remove(dfn)
185            self.rotate(self.baseFilename, dfn)
186        if not self.delay:
187            self.stream = self._open()
188
189    def shouldRollover(self, record):
190        """
191        Determine if rollover should occur.
192
193        Basically, see if the supplied record would cause the file to exceed
194        the size limit we have.
195        """
196        if self.stream is None:                 # delay was set...
197            self.stream = self._open()
198        if self.maxBytes > 0:                   # are we rolling over?
199            pos = self.stream.tell()
200            if not pos:
201                # gh-116263: Never rollover an empty file
202                return False
203            msg = "%s\n" % self.format(record)
204            if pos + len(msg) >= self.maxBytes:
205                # See bpo-45401: Never rollover anything other than regular files
206                if os.path.exists(self.baseFilename) and not os.path.isfile(self.baseFilename):
207                    return False
208                return True
209        return False
210
211class TimedRotatingFileHandler(BaseRotatingHandler):
212    """
213    Handler for logging to a file, rotating the log file at certain timed
214    intervals.
215
216    If backupCount is > 0, when rollover is done, no more than backupCount
217    files are kept - the oldest ones are deleted.
218    """
219    def __init__(self, filename, when='h', interval=1, backupCount=0,
220                 encoding=None, delay=False, utc=False, atTime=None,
221                 errors=None):
222        encoding = io.text_encoding(encoding)
223        BaseRotatingHandler.__init__(self, filename, 'a', encoding=encoding,
224                                     delay=delay, errors=errors)
225        self.when = when.upper()
226        self.backupCount = backupCount
227        self.utc = utc
228        self.atTime = atTime
229        # Calculate the real rollover interval, which is just the number of
230        # seconds between rollovers.  Also set the filename suffix used when
231        # a rollover occurs.  Current 'when' events supported:
232        # S - Seconds
233        # M - Minutes
234        # H - Hours
235        # D - Days
236        # midnight - roll over at midnight
237        # W{0-6} - roll over on a certain day; 0 - Monday
238        #
239        # Case of the 'when' specifier is not important; lower or upper case
240        # will work.
241        if self.when == 'S':
242            self.interval = 1 # one second
243            self.suffix = "%Y-%m-%d_%H-%M-%S"
244            extMatch = r"(?<!\d)\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}(?!\d)"
245        elif self.when == 'M':
246            self.interval = 60 # one minute
247            self.suffix = "%Y-%m-%d_%H-%M"
248            extMatch = r"(?<!\d)\d{4}-\d{2}-\d{2}_\d{2}-\d{2}(?!\d)"
249        elif self.when == 'H':
250            self.interval = 60 * 60 # one hour
251            self.suffix = "%Y-%m-%d_%H"
252            extMatch = r"(?<!\d)\d{4}-\d{2}-\d{2}_\d{2}(?!\d)"
253        elif self.when == 'D' or self.when == 'MIDNIGHT':
254            self.interval = 60 * 60 * 24 # one day
255            self.suffix = "%Y-%m-%d"
256            extMatch = r"(?<!\d)\d{4}-\d{2}-\d{2}(?!\d)"
257        elif self.when.startswith('W'):
258            self.interval = 60 * 60 * 24 * 7 # one week
259            if len(self.when) != 2:
260                raise ValueError("You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s" % self.when)
261            if self.when[1] < '0' or self.when[1] > '6':
262                raise ValueError("Invalid day specified for weekly rollover: %s" % self.when)
263            self.dayOfWeek = int(self.when[1])
264            self.suffix = "%Y-%m-%d"
265            extMatch = r"(?<!\d)\d{4}-\d{2}-\d{2}(?!\d)"
266        else:
267            raise ValueError("Invalid rollover interval specified: %s" % self.when)
268
269        # extMatch is a pattern for matching a datetime suffix in a file name.
270        # After custom naming, it is no longer guaranteed to be separated by
271        # periods from other parts of the filename.  The lookup statements
272        # (?<!\d) and (?!\d) ensure that the datetime suffix (which itself
273        # starts and ends with digits) is not preceded or followed by digits.
274        # This reduces the number of false matches and improves performance.
275        self.extMatch = re.compile(extMatch, re.ASCII)
276        self.interval = self.interval * interval # multiply by units requested
277        # The following line added because the filename passed in could be a
278        # path object (see Issue #27493), but self.baseFilename will be a string
279        filename = self.baseFilename
280        if os.path.exists(filename):
281            t = int(os.stat(filename).st_mtime)
282        else:
283            t = int(time.time())
284        self.rolloverAt = self.computeRollover(t)
285
286    def computeRollover(self, currentTime):
287        """
288        Work out the rollover time based on the specified time.
289        """
290        result = currentTime + self.interval
291        # If we are rolling over at midnight or weekly, then the interval is already known.
292        # What we need to figure out is WHEN the next interval is.  In other words,
293        # if you are rolling over at midnight, then your base interval is 1 day,
294        # but you want to start that one day clock at midnight, not now.  So, we
295        # have to fudge the rolloverAt value in order to trigger the first rollover
296        # at the right time.  After that, the regular interval will take care of
297        # the rest.  Note that this code doesn't care about leap seconds. :)
298        if self.when == 'MIDNIGHT' or self.when.startswith('W'):
299            # This could be done with less code, but I wanted it to be clear
300            if self.utc:
301                t = time.gmtime(currentTime)
302            else:
303                t = time.localtime(currentTime)
304            currentHour = t[3]
305            currentMinute = t[4]
306            currentSecond = t[5]
307            currentDay = t[6]
308            # r is the number of seconds left between now and the next rotation
309            if self.atTime is None:
310                rotate_ts = _MIDNIGHT
311            else:
312                rotate_ts = ((self.atTime.hour * 60 + self.atTime.minute)*60 +
313                    self.atTime.second)
314
315            r = rotate_ts - ((currentHour * 60 + currentMinute) * 60 +
316                currentSecond)
317            if r <= 0:
318                # Rotate time is before the current time (for example when
319                # self.rotateAt is 13:45 and it now 14:15), rotation is
320                # tomorrow.
321                r += _MIDNIGHT
322                currentDay = (currentDay + 1) % 7
323            result = currentTime + r
324            # If we are rolling over on a certain day, add in the number of days until
325            # the next rollover, but offset by 1 since we just calculated the time
326            # until the next day starts.  There are three cases:
327            # Case 1) The day to rollover is today; in this case, do nothing
328            # Case 2) The day to rollover is further in the interval (i.e., today is
329            #         day 2 (Wednesday) and rollover is on day 6 (Sunday).  Days to
330            #         next rollover is simply 6 - 2 - 1, or 3.
331            # Case 3) The day to rollover is behind us in the interval (i.e., today
332            #         is day 5 (Saturday) and rollover is on day 3 (Thursday).
333            #         Days to rollover is 6 - 5 + 3, or 4.  In this case, it's the
334            #         number of days left in the current week (1) plus the number
335            #         of days in the next week until the rollover day (3).
336            # The calculations described in 2) and 3) above need to have a day added.
337            # This is because the above time calculation takes us to midnight on this
338            # day, i.e. the start of the next day.
339            if self.when.startswith('W'):
340                day = currentDay # 0 is Monday
341                if day != self.dayOfWeek:
342                    if day < self.dayOfWeek:
343                        daysToWait = self.dayOfWeek - day
344                    else:
345                        daysToWait = 6 - day + self.dayOfWeek + 1
346                    result += daysToWait * _MIDNIGHT
347                result += self.interval - _MIDNIGHT * 7
348            else:
349                result += self.interval - _MIDNIGHT
350            if not self.utc:
351                dstNow = t[-1]
352                dstAtRollover = time.localtime(result)[-1]
353                if dstNow != dstAtRollover:
354                    if not dstNow:  # DST kicks in before next rollover, so we need to deduct an hour
355                        addend = -3600
356                        if not time.localtime(result-3600)[-1]:
357                            addend = 0
358                    else:           # DST bows out before next rollover, so we need to add an hour
359                        addend = 3600
360                    result += addend
361        return result
362
363    def shouldRollover(self, record):
364        """
365        Determine if rollover should occur.
366
367        record is not used, as we are just comparing times, but it is needed so
368        the method signatures are the same
369        """
370        t = int(time.time())
371        if t >= self.rolloverAt:
372            # See #89564: Never rollover anything other than regular files
373            if os.path.exists(self.baseFilename) and not os.path.isfile(self.baseFilename):
374                # The file is not a regular file, so do not rollover, but do
375                # set the next rollover time to avoid repeated checks.
376                self.rolloverAt = self.computeRollover(t)
377                return False
378
379            return True
380        return False
381
382    def getFilesToDelete(self):
383        """
384        Determine the files to delete when rolling over.
385
386        More specific than the earlier method, which just used glob.glob().
387        """
388        dirName, baseName = os.path.split(self.baseFilename)
389        fileNames = os.listdir(dirName)
390        result = []
391        if self.namer is None:
392            prefix = baseName + '.'
393            plen = len(prefix)
394            for fileName in fileNames:
395                if fileName[:plen] == prefix:
396                    suffix = fileName[plen:]
397                    if self.extMatch.fullmatch(suffix):
398                        result.append(os.path.join(dirName, fileName))
399        else:
400            for fileName in fileNames:
401                # Our files could be just about anything after custom naming,
402                # but they should contain the datetime suffix.
403                # Try to find the datetime suffix in the file name and verify
404                # that the file name can be generated by this handler.
405                m = self.extMatch.search(fileName)
406                while m:
407                    dfn = self.namer(self.baseFilename + "." + m[0])
408                    if os.path.basename(dfn) == fileName:
409                        result.append(os.path.join(dirName, fileName))
410                        break
411                    m = self.extMatch.search(fileName, m.start() + 1)
412
413        if len(result) < self.backupCount:
414            result = []
415        else:
416            result.sort()
417            result = result[:len(result) - self.backupCount]
418        return result
419
420    def doRollover(self):
421        """
422        do a rollover; in this case, a date/time stamp is appended to the filename
423        when the rollover happens.  However, you want the file to be named for the
424        start of the interval, not the current time.  If there is a backup count,
425        then we have to get a list of matching filenames, sort them and remove
426        the one with the oldest suffix.
427        """
428        # get the time that this sequence started at and make it a TimeTuple
429        currentTime = int(time.time())
430        t = self.rolloverAt - self.interval
431        if self.utc:
432            timeTuple = time.gmtime(t)
433        else:
434            timeTuple = time.localtime(t)
435            dstNow = time.localtime(currentTime)[-1]
436            dstThen = timeTuple[-1]
437            if dstNow != dstThen:
438                if dstNow:
439                    addend = 3600
440                else:
441                    addend = -3600
442                timeTuple = time.localtime(t + addend)
443        dfn = self.rotation_filename(self.baseFilename + "." +
444                                     time.strftime(self.suffix, timeTuple))
445        if os.path.exists(dfn):
446            # Already rolled over.
447            return
448
449        if self.stream:
450            self.stream.close()
451            self.stream = None
452        self.rotate(self.baseFilename, dfn)
453        if self.backupCount > 0:
454            for s in self.getFilesToDelete():
455                os.remove(s)
456        if not self.delay:
457            self.stream = self._open()
458        self.rolloverAt = self.computeRollover(currentTime)
459
460class WatchedFileHandler(logging.FileHandler):
461    """
462    A handler for logging to a file, which watches the file
463    to see if it has changed while in use. This can happen because of
464    usage of programs such as newsyslog and logrotate which perform
465    log file rotation. This handler, intended for use under Unix,
466    watches the file to see if it has changed since the last emit.
467    (A file has changed if its device or inode have changed.)
468    If it has changed, the old file stream is closed, and the file
469    opened to get a new stream.
470
471    This handler is not appropriate for use under Windows, because
472    under Windows open files cannot be moved or renamed - logging
473    opens the files with exclusive locks - and so there is no need
474    for such a handler.
475
476    This handler is based on a suggestion and patch by Chad J.
477    Schroeder.
478    """
479    def __init__(self, filename, mode='a', encoding=None, delay=False,
480                 errors=None):
481        if "b" not in mode:
482            encoding = io.text_encoding(encoding)
483        logging.FileHandler.__init__(self, filename, mode=mode,
484                                     encoding=encoding, delay=delay,
485                                     errors=errors)
486        self.dev, self.ino = -1, -1
487        self._statstream()
488
489    def _statstream(self):
490        if self.stream is None:
491            return
492        sres = os.fstat(self.stream.fileno())
493        self.dev = sres.st_dev
494        self.ino = sres.st_ino
495
496    def reopenIfNeeded(self):
497        """
498        Reopen log file if needed.
499
500        Checks if the underlying file has changed, and if it
501        has, close the old stream and reopen the file to get the
502        current stream.
503        """
504        if self.stream is None:
505            return
506
507        # Reduce the chance of race conditions by stat'ing by path only
508        # once and then fstat'ing our new fd if we opened a new log stream.
509        # See issue #14632: Thanks to John Mulligan for the problem report
510        # and patch.
511        try:
512            # stat the file by path, checking for existence
513            sres = os.stat(self.baseFilename)
514
515            # compare file system stat with that of our stream file handle
516            reopen = (sres.st_dev != self.dev or sres.st_ino != self.ino)
517        except FileNotFoundError:
518            reopen = True
519
520        if not reopen:
521            return
522
523        # we have an open file handle, clean it up
524        self.stream.flush()
525        self.stream.close()
526        self.stream = None  # See Issue #21742: _open () might fail.
527
528        # open a new file handle and get new stat info from that fd
529        self.stream = self._open()
530        self._statstream()
531
532    def emit(self, record):
533        """
534        Emit a record.
535
536        If underlying file has changed, reopen the file before emitting the
537        record to it.
538        """
539        self.reopenIfNeeded()
540        logging.FileHandler.emit(self, record)
541
542
543class SocketHandler(logging.Handler):
544    """
545    A handler class which writes logging records, in pickle format, to
546    a streaming socket. The socket is kept open across logging calls.
547    If the peer resets it, an attempt is made to reconnect on the next call.
548    The pickle which is sent is that of the LogRecord's attribute dictionary
549    (__dict__), so that the receiver does not need to have the logging module
550    installed in order to process the logging event.
551
552    To unpickle the record at the receiving end into a LogRecord, use the
553    makeLogRecord function.
554    """
555
556    def __init__(self, host, port):
557        """
558        Initializes the handler with a specific host address and port.
559
560        When the attribute *closeOnError* is set to True - if a socket error
561        occurs, the socket is silently closed and then reopened on the next
562        logging call.
563        """
564        logging.Handler.__init__(self)
565        self.host = host
566        self.port = port
567        if port is None:
568            self.address = host
569        else:
570            self.address = (host, port)
571        self.sock = None
572        self.closeOnError = False
573        self.retryTime = None
574        #
575        # Exponential backoff parameters.
576        #
577        self.retryStart = 1.0
578        self.retryMax = 30.0
579        self.retryFactor = 2.0
580
581    def makeSocket(self, timeout=1):
582        """
583        A factory method which allows subclasses to define the precise
584        type of socket they want.
585        """
586        if self.port is not None:
587            result = socket.create_connection(self.address, timeout=timeout)
588        else:
589            result = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
590            result.settimeout(timeout)
591            try:
592                result.connect(self.address)
593            except OSError:
594                result.close()  # Issue 19182
595                raise
596        return result
597
598    def createSocket(self):
599        """
600        Try to create a socket, using an exponential backoff with
601        a max retry time. Thanks to Robert Olson for the original patch
602        (SF #815911) which has been slightly refactored.
603        """
604        now = time.time()
605        # Either retryTime is None, in which case this
606        # is the first time back after a disconnect, or
607        # we've waited long enough.
608        if self.retryTime is None:
609            attempt = True
610        else:
611            attempt = (now >= self.retryTime)
612        if attempt:
613            try:
614                self.sock = self.makeSocket()
615                self.retryTime = None # next time, no delay before trying
616            except OSError:
617                #Creation failed, so set the retry time and return.
618                if self.retryTime is None:
619                    self.retryPeriod = self.retryStart
620                else:
621                    self.retryPeriod = self.retryPeriod * self.retryFactor
622                    if self.retryPeriod > self.retryMax:
623                        self.retryPeriod = self.retryMax
624                self.retryTime = now + self.retryPeriod
625
626    def send(self, s):
627        """
628        Send a pickled string to the socket.
629
630        This function allows for partial sends which can happen when the
631        network is busy.
632        """
633        if self.sock is None:
634            self.createSocket()
635        #self.sock can be None either because we haven't reached the retry
636        #time yet, or because we have reached the retry time and retried,
637        #but are still unable to connect.
638        if self.sock:
639            try:
640                self.sock.sendall(s)
641            except OSError: #pragma: no cover
642                self.sock.close()
643                self.sock = None  # so we can call createSocket next time
644
645    def makePickle(self, record):
646        """
647        Pickles the record in binary format with a length prefix, and
648        returns it ready for transmission across the socket.
649        """
650        ei = record.exc_info
651        if ei:
652            # just to get traceback text into record.exc_text ...
653            dummy = self.format(record)
654        # See issue #14436: If msg or args are objects, they may not be
655        # available on the receiving end. So we convert the msg % args
656        # to a string, save it as msg and zap the args.
657        d = dict(record.__dict__)
658        d['msg'] = record.getMessage()
659        d['args'] = None
660        d['exc_info'] = None
661        # Issue #25685: delete 'message' if present: redundant with 'msg'
662        d.pop('message', None)
663        s = pickle.dumps(d, 1)
664        slen = struct.pack(">L", len(s))
665        return slen + s
666
667    def handleError(self, record):
668        """
669        Handle an error during logging.
670
671        An error has occurred during logging. Most likely cause -
672        connection lost. Close the socket so that we can retry on the
673        next event.
674        """
675        if self.closeOnError and self.sock:
676            self.sock.close()
677            self.sock = None        #try to reconnect next time
678        else:
679            logging.Handler.handleError(self, record)
680
681    def emit(self, record):
682        """
683        Emit a record.
684
685        Pickles the record and writes it to the socket in binary format.
686        If there is an error with the socket, silently drop the packet.
687        If there was a problem with the socket, re-establishes the
688        socket.
689        """
690        try:
691            s = self.makePickle(record)
692            self.send(s)
693        except Exception:
694            self.handleError(record)
695
696    def close(self):
697        """
698        Closes the socket.
699        """
700        with self.lock:
701            sock = self.sock
702            if sock:
703                self.sock = None
704                sock.close()
705            logging.Handler.close(self)
706
707class DatagramHandler(SocketHandler):
708    """
709    A handler class which writes logging records, in pickle format, to
710    a datagram socket.  The pickle which is sent is that of the LogRecord's
711    attribute dictionary (__dict__), so that the receiver does not need to
712    have the logging module installed in order to process the logging event.
713
714    To unpickle the record at the receiving end into a LogRecord, use the
715    makeLogRecord function.
716
717    """
718    def __init__(self, host, port):
719        """
720        Initializes the handler with a specific host address and port.
721        """
722        SocketHandler.__init__(self, host, port)
723        self.closeOnError = False
724
725    def makeSocket(self):
726        """
727        The factory method of SocketHandler is here overridden to create
728        a UDP socket (SOCK_DGRAM).
729        """
730        if self.port is None:
731            family = socket.AF_UNIX
732        else:
733            family = socket.AF_INET
734        s = socket.socket(family, socket.SOCK_DGRAM)
735        return s
736
737    def send(self, s):
738        """
739        Send a pickled string to a socket.
740
741        This function no longer allows for partial sends which can happen
742        when the network is busy - UDP does not guarantee delivery and
743        can deliver packets out of sequence.
744        """
745        if self.sock is None:
746            self.createSocket()
747        self.sock.sendto(s, self.address)
748
749class SysLogHandler(logging.Handler):
750    """
751    A handler class which sends formatted logging records to a syslog
752    server. Based on Sam Rushing's syslog module:
753    http://www.nightmare.com/squirl/python-ext/misc/syslog.py
754    Contributed by Nicolas Untz (after which minor refactoring changes
755    have been made).
756    """
757
758    # from <linux/sys/syslog.h>:
759    # ======================================================================
760    # priorities/facilities are encoded into a single 32-bit quantity, where
761    # the bottom 3 bits are the priority (0-7) and the top 28 bits are the
762    # facility (0-big number). Both the priorities and the facilities map
763    # roughly one-to-one to strings in the syslogd(8) source code.  This
764    # mapping is included in this file.
765    #
766    # priorities (these are ordered)
767
768    LOG_EMERG     = 0       #  system is unusable
769    LOG_ALERT     = 1       #  action must be taken immediately
770    LOG_CRIT      = 2       #  critical conditions
771    LOG_ERR       = 3       #  error conditions
772    LOG_WARNING   = 4       #  warning conditions
773    LOG_NOTICE    = 5       #  normal but significant condition
774    LOG_INFO      = 6       #  informational
775    LOG_DEBUG     = 7       #  debug-level messages
776
777    #  facility codes
778    LOG_KERN      = 0       #  kernel messages
779    LOG_USER      = 1       #  random user-level messages
780    LOG_MAIL      = 2       #  mail system
781    LOG_DAEMON    = 3       #  system daemons
782    LOG_AUTH      = 4       #  security/authorization messages
783    LOG_SYSLOG    = 5       #  messages generated internally by syslogd
784    LOG_LPR       = 6       #  line printer subsystem
785    LOG_NEWS      = 7       #  network news subsystem
786    LOG_UUCP      = 8       #  UUCP subsystem
787    LOG_CRON      = 9       #  clock daemon
788    LOG_AUTHPRIV  = 10      #  security/authorization messages (private)
789    LOG_FTP       = 11      #  FTP daemon
790    LOG_NTP       = 12      #  NTP subsystem
791    LOG_SECURITY  = 13      #  Log audit
792    LOG_CONSOLE   = 14      #  Log alert
793    LOG_SOLCRON   = 15      #  Scheduling daemon (Solaris)
794
795    #  other codes through 15 reserved for system use
796    LOG_LOCAL0    = 16      #  reserved for local use
797    LOG_LOCAL1    = 17      #  reserved for local use
798    LOG_LOCAL2    = 18      #  reserved for local use
799    LOG_LOCAL3    = 19      #  reserved for local use
800    LOG_LOCAL4    = 20      #  reserved for local use
801    LOG_LOCAL5    = 21      #  reserved for local use
802    LOG_LOCAL6    = 22      #  reserved for local use
803    LOG_LOCAL7    = 23      #  reserved for local use
804
805    priority_names = {
806        "alert":    LOG_ALERT,
807        "crit":     LOG_CRIT,
808        "critical": LOG_CRIT,
809        "debug":    LOG_DEBUG,
810        "emerg":    LOG_EMERG,
811        "err":      LOG_ERR,
812        "error":    LOG_ERR,        #  DEPRECATED
813        "info":     LOG_INFO,
814        "notice":   LOG_NOTICE,
815        "panic":    LOG_EMERG,      #  DEPRECATED
816        "warn":     LOG_WARNING,    #  DEPRECATED
817        "warning":  LOG_WARNING,
818        }
819
820    facility_names = {
821        "auth":         LOG_AUTH,
822        "authpriv":     LOG_AUTHPRIV,
823        "console":      LOG_CONSOLE,
824        "cron":         LOG_CRON,
825        "daemon":       LOG_DAEMON,
826        "ftp":          LOG_FTP,
827        "kern":         LOG_KERN,
828        "lpr":          LOG_LPR,
829        "mail":         LOG_MAIL,
830        "news":         LOG_NEWS,
831        "ntp":          LOG_NTP,
832        "security":     LOG_SECURITY,
833        "solaris-cron": LOG_SOLCRON,
834        "syslog":       LOG_SYSLOG,
835        "user":         LOG_USER,
836        "uucp":         LOG_UUCP,
837        "local0":       LOG_LOCAL0,
838        "local1":       LOG_LOCAL1,
839        "local2":       LOG_LOCAL2,
840        "local3":       LOG_LOCAL3,
841        "local4":       LOG_LOCAL4,
842        "local5":       LOG_LOCAL5,
843        "local6":       LOG_LOCAL6,
844        "local7":       LOG_LOCAL7,
845        }
846
847    # Originally added to work around GH-43683. Unnecessary since GH-50043 but kept
848    # for backwards compatibility.
849    priority_map = {
850        "DEBUG" : "debug",
851        "INFO" : "info",
852        "WARNING" : "warning",
853        "ERROR" : "error",
854        "CRITICAL" : "critical"
855    }
856
857    def __init__(self, address=('localhost', SYSLOG_UDP_PORT),
858                 facility=LOG_USER, socktype=None):
859        """
860        Initialize a handler.
861
862        If address is specified as a string, a UNIX socket is used. To log to a
863        local syslogd, "SysLogHandler(address="/dev/log")" can be used.
864        If facility is not specified, LOG_USER is used. If socktype is
865        specified as socket.SOCK_DGRAM or socket.SOCK_STREAM, that specific
866        socket type will be used. For Unix sockets, you can also specify a
867        socktype of None, in which case socket.SOCK_DGRAM will be used, falling
868        back to socket.SOCK_STREAM.
869        """
870        logging.Handler.__init__(self)
871
872        self.address = address
873        self.facility = facility
874        self.socktype = socktype
875        self.socket = None
876        self.createSocket()
877
878    def _connect_unixsocket(self, address):
879        use_socktype = self.socktype
880        if use_socktype is None:
881            use_socktype = socket.SOCK_DGRAM
882        self.socket = socket.socket(socket.AF_UNIX, use_socktype)
883        try:
884            self.socket.connect(address)
885            # it worked, so set self.socktype to the used type
886            self.socktype = use_socktype
887        except OSError:
888            self.socket.close()
889            if self.socktype is not None:
890                # user didn't specify falling back, so fail
891                raise
892            use_socktype = socket.SOCK_STREAM
893            self.socket = socket.socket(socket.AF_UNIX, use_socktype)
894            try:
895                self.socket.connect(address)
896                # it worked, so set self.socktype to the used type
897                self.socktype = use_socktype
898            except OSError:
899                self.socket.close()
900                raise
901
902    def createSocket(self):
903        """
904        Try to create a socket and, if it's not a datagram socket, connect it
905        to the other end. This method is called during handler initialization,
906        but it's not regarded as an error if the other end isn't listening yet
907        --- the method will be called again when emitting an event,
908        if there is no socket at that point.
909        """
910        address = self.address
911        socktype = self.socktype
912
913        if isinstance(address, str):
914            self.unixsocket = True
915            # Syslog server may be unavailable during handler initialisation.
916            # C's openlog() function also ignores connection errors.
917            # Moreover, we ignore these errors while logging, so it's not worse
918            # to ignore it also here.
919            try:
920                self._connect_unixsocket(address)
921            except OSError:
922                pass
923        else:
924            self.unixsocket = False
925            if socktype is None:
926                socktype = socket.SOCK_DGRAM
927            host, port = address
928            ress = socket.getaddrinfo(host, port, 0, socktype)
929            if not ress:
930                raise OSError("getaddrinfo returns an empty list")
931            for res in ress:
932                af, socktype, proto, _, sa = res
933                err = sock = None
934                try:
935                    sock = socket.socket(af, socktype, proto)
936                    if socktype == socket.SOCK_STREAM:
937                        sock.connect(sa)
938                    break
939                except OSError as exc:
940                    err = exc
941                    if sock is not None:
942                        sock.close()
943            if err is not None:
944                raise err
945            self.socket = sock
946            self.socktype = socktype
947
948    def encodePriority(self, facility, priority):
949        """
950        Encode the facility and priority. You can pass in strings or
951        integers - if strings are passed, the facility_names and
952        priority_names mapping dictionaries are used to convert them to
953        integers.
954        """
955        if isinstance(facility, str):
956            facility = self.facility_names[facility]
957        if isinstance(priority, str):
958            priority = self.priority_names[priority]
959        return (facility << 3) | priority
960
961    def close(self):
962        """
963        Closes the socket.
964        """
965        with self.lock:
966            sock = self.socket
967            if sock:
968                self.socket = None
969                sock.close()
970            logging.Handler.close(self)
971
972    def mapPriority(self, levelName):
973        """
974        Map a logging level name to a key in the priority_names map.
975        This is useful in two scenarios: when custom levels are being
976        used, and in the case where you can't do a straightforward
977        mapping by lowercasing the logging level name because of locale-
978        specific issues (see SF #1524081).
979        """
980        return self.priority_map.get(levelName, "warning")
981
982    ident = ''          # prepended to all messages
983    append_nul = True   # some old syslog daemons expect a NUL terminator
984
985    def emit(self, record):
986        """
987        Emit a record.
988
989        The record is formatted, and then sent to the syslog server. If
990        exception information is present, it is NOT sent to the server.
991        """
992        try:
993            msg = self.format(record)
994            if self.ident:
995                msg = self.ident + msg
996            if self.append_nul:
997                msg += '\000'
998
999            # We need to convert record level to lowercase, maybe this will
1000            # change in the future.
1001            prio = '<%d>' % self.encodePriority(self.facility,
1002                                                self.mapPriority(record.levelname))
1003            prio = prio.encode('utf-8')
1004            # Message is a string. Convert to bytes as required by RFC 5424
1005            msg = msg.encode('utf-8')
1006            msg = prio + msg
1007
1008            if not self.socket:
1009                self.createSocket()
1010
1011            if self.unixsocket:
1012                try:
1013                    self.socket.send(msg)
1014                except OSError:
1015                    self.socket.close()
1016                    self._connect_unixsocket(self.address)
1017                    self.socket.send(msg)
1018            elif self.socktype == socket.SOCK_DGRAM:
1019                self.socket.sendto(msg, self.address)
1020            else:
1021                self.socket.sendall(msg)
1022        except Exception:
1023            self.handleError(record)
1024
1025class SMTPHandler(logging.Handler):
1026    """
1027    A handler class which sends an SMTP email for each logging event.
1028    """
1029    def __init__(self, mailhost, fromaddr, toaddrs, subject,
1030                 credentials=None, secure=None, timeout=5.0):
1031        """
1032        Initialize the handler.
1033
1034        Initialize the instance with the from and to addresses and subject
1035        line of the email. To specify a non-standard SMTP port, use the
1036        (host, port) tuple format for the mailhost argument. To specify
1037        authentication credentials, supply a (username, password) tuple
1038        for the credentials argument. To specify the use of a secure
1039        protocol (TLS), pass in a tuple for the secure argument. This will
1040        only be used when authentication credentials are supplied. The tuple
1041        will be either an empty tuple, or a single-value tuple with the name
1042        of a keyfile, or a 2-value tuple with the names of the keyfile and
1043        certificate file. (This tuple is passed to the `starttls` method).
1044        A timeout in seconds can be specified for the SMTP connection (the
1045        default is one second).
1046        """
1047        logging.Handler.__init__(self)
1048        if isinstance(mailhost, (list, tuple)):
1049            self.mailhost, self.mailport = mailhost
1050        else:
1051            self.mailhost, self.mailport = mailhost, None
1052        if isinstance(credentials, (list, tuple)):
1053            self.username, self.password = credentials
1054        else:
1055            self.username = None
1056        self.fromaddr = fromaddr
1057        if isinstance(toaddrs, str):
1058            toaddrs = [toaddrs]
1059        self.toaddrs = toaddrs
1060        self.subject = subject
1061        self.secure = secure
1062        self.timeout = timeout
1063
1064    def getSubject(self, record):
1065        """
1066        Determine the subject for the email.
1067
1068        If you want to specify a subject line which is record-dependent,
1069        override this method.
1070        """
1071        return self.subject
1072
1073    def emit(self, record):
1074        """
1075        Emit a record.
1076
1077        Format the record and send it to the specified addressees.
1078        """
1079        try:
1080            import smtplib
1081            from email.message import EmailMessage
1082            import email.utils
1083
1084            port = self.mailport
1085            if not port:
1086                port = smtplib.SMTP_PORT
1087            smtp = smtplib.SMTP(self.mailhost, port, timeout=self.timeout)
1088            msg = EmailMessage()
1089            msg['From'] = self.fromaddr
1090            msg['To'] = ','.join(self.toaddrs)
1091            msg['Subject'] = self.getSubject(record)
1092            msg['Date'] = email.utils.localtime()
1093            msg.set_content(self.format(record))
1094            if self.username:
1095                if self.secure is not None:
1096                    smtp.ehlo()
1097                    smtp.starttls(*self.secure)
1098                    smtp.ehlo()
1099                smtp.login(self.username, self.password)
1100            smtp.send_message(msg)
1101            smtp.quit()
1102        except Exception:
1103            self.handleError(record)
1104
1105class NTEventLogHandler(logging.Handler):
1106    """
1107    A handler class which sends events to the NT Event Log. Adds a
1108    registry entry for the specified application name. If no dllname is
1109    provided, win32service.pyd (which contains some basic message
1110    placeholders) is used. Note that use of these placeholders will make
1111    your event logs big, as the entire message source is held in the log.
1112    If you want slimmer logs, you have to pass in the name of your own DLL
1113    which contains the message definitions you want to use in the event log.
1114    """
1115    def __init__(self, appname, dllname=None, logtype="Application"):
1116        logging.Handler.__init__(self)
1117        try:
1118            import win32evtlogutil, win32evtlog
1119            self.appname = appname
1120            self._welu = win32evtlogutil
1121            if not dllname:
1122                dllname = os.path.split(self._welu.__file__)
1123                dllname = os.path.split(dllname[0])
1124                dllname = os.path.join(dllname[0], r'win32service.pyd')
1125            self.dllname = dllname
1126            self.logtype = logtype
1127            # Administrative privileges are required to add a source to the registry.
1128            # This may not be available for a user that just wants to add to an
1129            # existing source - handle this specific case.
1130            try:
1131                self._welu.AddSourceToRegistry(appname, dllname, logtype)
1132            except Exception as e:
1133                # This will probably be a pywintypes.error. Only raise if it's not
1134                # an "access denied" error, else let it pass
1135                if getattr(e, 'winerror', None) != 5:  # not access denied
1136                    raise
1137            self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE
1138            self.typemap = {
1139                logging.DEBUG   : win32evtlog.EVENTLOG_INFORMATION_TYPE,
1140                logging.INFO    : win32evtlog.EVENTLOG_INFORMATION_TYPE,
1141                logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE,
1142                logging.ERROR   : win32evtlog.EVENTLOG_ERROR_TYPE,
1143                logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE,
1144         }
1145        except ImportError:
1146            print("The Python Win32 extensions for NT (service, event "\
1147                        "logging) appear not to be available.")
1148            self._welu = None
1149
1150    def getMessageID(self, record):
1151        """
1152        Return the message ID for the event record. If you are using your
1153        own messages, you could do this by having the msg passed to the
1154        logger being an ID rather than a formatting string. Then, in here,
1155        you could use a dictionary lookup to get the message ID. This
1156        version returns 1, which is the base message ID in win32service.pyd.
1157        """
1158        return 1
1159
1160    def getEventCategory(self, record):
1161        """
1162        Return the event category for the record.
1163
1164        Override this if you want to specify your own categories. This version
1165        returns 0.
1166        """
1167        return 0
1168
1169    def getEventType(self, record):
1170        """
1171        Return the event type for the record.
1172
1173        Override this if you want to specify your own types. This version does
1174        a mapping using the handler's typemap attribute, which is set up in
1175        __init__() to a dictionary which contains mappings for DEBUG, INFO,
1176        WARNING, ERROR and CRITICAL. If you are using your own levels you will
1177        either need to override this method or place a suitable dictionary in
1178        the handler's typemap attribute.
1179        """
1180        return self.typemap.get(record.levelno, self.deftype)
1181
1182    def emit(self, record):
1183        """
1184        Emit a record.
1185
1186        Determine the message ID, event category and event type. Then
1187        log the message in the NT event log.
1188        """
1189        if self._welu:
1190            try:
1191                id = self.getMessageID(record)
1192                cat = self.getEventCategory(record)
1193                type = self.getEventType(record)
1194                msg = self.format(record)
1195                self._welu.ReportEvent(self.appname, id, cat, type, [msg])
1196            except Exception:
1197                self.handleError(record)
1198
1199    def close(self):
1200        """
1201        Clean up this handler.
1202
1203        You can remove the application name from the registry as a
1204        source of event log entries. However, if you do this, you will
1205        not be able to see the events as you intended in the Event Log
1206        Viewer - it needs to be able to access the registry to get the
1207        DLL name.
1208        """
1209        #self._welu.RemoveSourceFromRegistry(self.appname, self.logtype)
1210        logging.Handler.close(self)
1211
1212class HTTPHandler(logging.Handler):
1213    """
1214    A class which sends records to a web server, using either GET or
1215    POST semantics.
1216    """
1217    def __init__(self, host, url, method="GET", secure=False, credentials=None,
1218                 context=None):
1219        """
1220        Initialize the instance with the host, the request URL, and the method
1221        ("GET" or "POST")
1222        """
1223        logging.Handler.__init__(self)
1224        method = method.upper()
1225        if method not in ["GET", "POST"]:
1226            raise ValueError("method must be GET or POST")
1227        if not secure and context is not None:
1228            raise ValueError("context parameter only makes sense "
1229                             "with secure=True")
1230        self.host = host
1231        self.url = url
1232        self.method = method
1233        self.secure = secure
1234        self.credentials = credentials
1235        self.context = context
1236
1237    def mapLogRecord(self, record):
1238        """
1239        Default implementation of mapping the log record into a dict
1240        that is sent as the CGI data. Overwrite in your class.
1241        Contributed by Franz Glasner.
1242        """
1243        return record.__dict__
1244
1245    def getConnection(self, host, secure):
1246        """
1247        get a HTTP[S]Connection.
1248
1249        Override when a custom connection is required, for example if
1250        there is a proxy.
1251        """
1252        import http.client
1253        if secure:
1254            connection = http.client.HTTPSConnection(host, context=self.context)
1255        else:
1256            connection = http.client.HTTPConnection(host)
1257        return connection
1258
1259    def emit(self, record):
1260        """
1261        Emit a record.
1262
1263        Send the record to the web server as a percent-encoded dictionary
1264        """
1265        try:
1266            import urllib.parse
1267            host = self.host
1268            h = self.getConnection(host, self.secure)
1269            url = self.url
1270            data = urllib.parse.urlencode(self.mapLogRecord(record))
1271            if self.method == "GET":
1272                if (url.find('?') >= 0):
1273                    sep = '&'
1274                else:
1275                    sep = '?'
1276                url = url + "%c%s" % (sep, data)
1277            h.putrequest(self.method, url)
1278            # support multiple hosts on one IP address...
1279            # need to strip optional :port from host, if present
1280            i = host.find(":")
1281            if i >= 0:
1282                host = host[:i]
1283            # See issue #30904: putrequest call above already adds this header
1284            # on Python 3.x.
1285            # h.putheader("Host", host)
1286            if self.method == "POST":
1287                h.putheader("Content-type",
1288                            "application/x-www-form-urlencoded")
1289                h.putheader("Content-length", str(len(data)))
1290            if self.credentials:
1291                import base64
1292                s = ('%s:%s' % self.credentials).encode('utf-8')
1293                s = 'Basic ' + base64.b64encode(s).strip().decode('ascii')
1294                h.putheader('Authorization', s)
1295            h.endheaders()
1296            if self.method == "POST":
1297                h.send(data.encode('utf-8'))
1298            h.getresponse()    #can't do anything with the result
1299        except Exception:
1300            self.handleError(record)
1301
1302class BufferingHandler(logging.Handler):
1303    """
1304  A handler class which buffers logging records in memory. Whenever each
1305  record is added to the buffer, a check is made to see if the buffer should
1306  be flushed. If it should, then flush() is expected to do what's needed.
1307    """
1308    def __init__(self, capacity):
1309        """
1310        Initialize the handler with the buffer size.
1311        """
1312        logging.Handler.__init__(self)
1313        self.capacity = capacity
1314        self.buffer = []
1315
1316    def shouldFlush(self, record):
1317        """
1318        Should the handler flush its buffer?
1319
1320        Returns true if the buffer is up to capacity. This method can be
1321        overridden to implement custom flushing strategies.
1322        """
1323        return (len(self.buffer) >= self.capacity)
1324
1325    def emit(self, record):
1326        """
1327        Emit a record.
1328
1329        Append the record. If shouldFlush() tells us to, call flush() to process
1330        the buffer.
1331        """
1332        self.buffer.append(record)
1333        if self.shouldFlush(record):
1334            self.flush()
1335
1336    def flush(self):
1337        """
1338        Override to implement custom flushing behaviour.
1339
1340        This version just zaps the buffer to empty.
1341        """
1342        with self.lock:
1343            self.buffer.clear()
1344
1345    def close(self):
1346        """
1347        Close the handler.
1348
1349        This version just flushes and chains to the parent class' close().
1350        """
1351        try:
1352            self.flush()
1353        finally:
1354            logging.Handler.close(self)
1355
1356class MemoryHandler(BufferingHandler):
1357    """
1358    A handler class which buffers logging records in memory, periodically
1359    flushing them to a target handler. Flushing occurs whenever the buffer
1360    is full, or when an event of a certain severity or greater is seen.
1361    """
1362    def __init__(self, capacity, flushLevel=logging.ERROR, target=None,
1363                 flushOnClose=True):
1364        """
1365        Initialize the handler with the buffer size, the level at which
1366        flushing should occur and an optional target.
1367
1368        Note that without a target being set either here or via setTarget(),
1369        a MemoryHandler is no use to anyone!
1370
1371        The ``flushOnClose`` argument is ``True`` for backward compatibility
1372        reasons - the old behaviour is that when the handler is closed, the
1373        buffer is flushed, even if the flush level hasn't been exceeded nor the
1374        capacity exceeded. To prevent this, set ``flushOnClose`` to ``False``.
1375        """
1376        BufferingHandler.__init__(self, capacity)
1377        self.flushLevel = flushLevel
1378        self.target = target
1379        # See Issue #26559 for why this has been added
1380        self.flushOnClose = flushOnClose
1381
1382    def shouldFlush(self, record):
1383        """
1384        Check for buffer full or a record at the flushLevel or higher.
1385        """
1386        return (len(self.buffer) >= self.capacity) or \
1387                (record.levelno >= self.flushLevel)
1388
1389    def setTarget(self, target):
1390        """
1391        Set the target handler for this handler.
1392        """
1393        with self.lock:
1394            self.target = target
1395
1396    def flush(self):
1397        """
1398        For a MemoryHandler, flushing means just sending the buffered
1399        records to the target, if there is one. Override if you want
1400        different behaviour.
1401
1402        The record buffer is only cleared if a target has been set.
1403        """
1404        with self.lock:
1405            if self.target:
1406                for record in self.buffer:
1407                    self.target.handle(record)
1408                self.buffer.clear()
1409
1410    def close(self):
1411        """
1412        Flush, if appropriately configured, set the target to None and lose the
1413        buffer.
1414        """
1415        try:
1416            if self.flushOnClose:
1417                self.flush()
1418        finally:
1419            with self.lock:
1420                self.target = None
1421                BufferingHandler.close(self)
1422
1423
1424class QueueHandler(logging.Handler):
1425    """
1426    This handler sends events to a queue. Typically, it would be used together
1427    with a multiprocessing Queue to centralise logging to file in one process
1428    (in a multi-process application), so as to avoid file write contention
1429    between processes.
1430
1431    This code is new in Python 3.2, but this class can be copy pasted into
1432    user code for use with earlier Python versions.
1433    """
1434
1435    def __init__(self, queue):
1436        """
1437        Initialise an instance, using the passed queue.
1438        """
1439        logging.Handler.__init__(self)
1440        self.queue = queue
1441        self.listener = None  # will be set to listener if configured via dictConfig()
1442
1443    def enqueue(self, record):
1444        """
1445        Enqueue a record.
1446
1447        The base implementation uses put_nowait. You may want to override
1448        this method if you want to use blocking, timeouts or custom queue
1449        implementations.
1450        """
1451        self.queue.put_nowait(record)
1452
1453    def prepare(self, record):
1454        """
1455        Prepare a record for queuing. The object returned by this method is
1456        enqueued.
1457
1458        The base implementation formats the record to merge the message and
1459        arguments, and removes unpickleable items from the record in-place.
1460        Specifically, it overwrites the record's `msg` and
1461        `message` attributes with the merged message (obtained by
1462        calling the handler's `format` method), and sets the `args`,
1463        `exc_info` and `exc_text` attributes to None.
1464
1465        You might want to override this method if you want to convert
1466        the record to a dict or JSON string, or send a modified copy
1467        of the record while leaving the original intact.
1468        """
1469        # The format operation gets traceback text into record.exc_text
1470        # (if there's exception data), and also returns the formatted
1471        # message. We can then use this to replace the original
1472        # msg + args, as these might be unpickleable. We also zap the
1473        # exc_info, exc_text and stack_info attributes, as they are no longer
1474        # needed and, if not None, will typically not be pickleable.
1475        msg = self.format(record)
1476        # bpo-35726: make copy of record to avoid affecting other handlers in the chain.
1477        record = copy.copy(record)
1478        record.message = msg
1479        record.msg = msg
1480        record.args = None
1481        record.exc_info = None
1482        record.exc_text = None
1483        record.stack_info = None
1484        return record
1485
1486    def emit(self, record):
1487        """
1488        Emit a record.
1489
1490        Writes the LogRecord to the queue, preparing it for pickling first.
1491        """
1492        try:
1493            self.enqueue(self.prepare(record))
1494        except Exception:
1495            self.handleError(record)
1496
1497
1498class QueueListener(object):
1499    """
1500    This class implements an internal threaded listener which watches for
1501    LogRecords being added to a queue, removes them and passes them to a
1502    list of handlers for processing.
1503    """
1504    _sentinel = None
1505
1506    def __init__(self, queue, *handlers, respect_handler_level=False):
1507        """
1508        Initialise an instance with the specified queue and
1509        handlers.
1510        """
1511        self.queue = queue
1512        self.handlers = handlers
1513        self._thread = None
1514        self.respect_handler_level = respect_handler_level
1515
1516    def dequeue(self, block):
1517        """
1518        Dequeue a record and return it, optionally blocking.
1519
1520        The base implementation uses get. You may want to override this method
1521        if you want to use timeouts or work with custom queue implementations.
1522        """
1523        return self.queue.get(block)
1524
1525    def start(self):
1526        """
1527        Start the listener.
1528
1529        This starts up a background thread to monitor the queue for
1530        LogRecords to process.
1531        """
1532        self._thread = t = threading.Thread(target=self._monitor)
1533        t.daemon = True
1534        t.start()
1535
1536    def prepare(self, record):
1537        """
1538        Prepare a record for handling.
1539
1540        This method just returns the passed-in record. You may want to
1541        override this method if you need to do any custom marshalling or
1542        manipulation of the record before passing it to the handlers.
1543        """
1544        return record
1545
1546    def handle(self, record):
1547        """
1548        Handle a record.
1549
1550        This just loops through the handlers offering them the record
1551        to handle.
1552        """
1553        record = self.prepare(record)
1554        for handler in self.handlers:
1555            if not self.respect_handler_level:
1556                process = True
1557            else:
1558                process = record.levelno >= handler.level
1559            if process:
1560                handler.handle(record)
1561
1562    def _monitor(self):
1563        """
1564        Monitor the queue for records, and ask the handler
1565        to deal with them.
1566
1567        This method runs on a separate, internal thread.
1568        The thread will terminate if it sees a sentinel object in the queue.
1569        """
1570        q = self.queue
1571        has_task_done = hasattr(q, 'task_done')
1572        while True:
1573            try:
1574                record = self.dequeue(True)
1575                if record is self._sentinel:
1576                    if has_task_done:
1577                        q.task_done()
1578                    break
1579                self.handle(record)
1580                if has_task_done:
1581                    q.task_done()
1582            except queue.Empty:
1583                break
1584
1585    def enqueue_sentinel(self):
1586        """
1587        This is used to enqueue the sentinel record.
1588
1589        The base implementation uses put_nowait. You may want to override this
1590        method if you want to use timeouts or work with custom queue
1591        implementations.
1592        """
1593        self.queue.put_nowait(self._sentinel)
1594
1595    def stop(self):
1596        """
1597        Stop the listener.
1598
1599        This asks the thread to terminate, and then waits for it to do so.
1600        Note that if you don't call this before your application exits, there
1601        may be some records still left on the queue, which won't be processed.
1602        """
1603        if self._thread:  # see gh-114706 - allow calling this more than once
1604            self.enqueue_sentinel()
1605            self._thread.join()
1606            self._thread = None
1607