• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2001-2021 by Vinay Sajip. All Rights Reserved.
2#
3# Permission to use, copy, modify, and distribute this software and its
4# documentation for any purpose and without fee is hereby granted,
5# provided that the above copyright notice appear in all copies and that
6# both that copyright notice and this permission notice appear in
7# supporting documentation, and that the name of Vinay Sajip
8# not be used in advertising or publicity pertaining to distribution
9# of the software without specific, written prior permission.
10# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
11# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
12# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
13# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
14# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
15# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16
17"""
18Additional handlers for the logging package for Python. The core package is
19based on PEP 282 and comments thereto in comp.lang.python.
20
21Copyright (C) 2001-2021 Vinay Sajip. All Rights Reserved.
22
23To use, simply 'import logging.handlers' and log away!
24"""
25
26import io, logging, socket, os, pickle, struct, time, re
27from stat import ST_DEV, ST_INO, ST_MTIME
28import queue
29import threading
30import copy
31
32#
33# Some constants...
34#
35
36DEFAULT_TCP_LOGGING_PORT    = 9020
37DEFAULT_UDP_LOGGING_PORT    = 9021
38DEFAULT_HTTP_LOGGING_PORT   = 9022
39DEFAULT_SOAP_LOGGING_PORT   = 9023
40SYSLOG_UDP_PORT             = 514
41SYSLOG_TCP_PORT             = 514
42
43_MIDNIGHT = 24 * 60 * 60  # number of seconds in a day
44
45class BaseRotatingHandler(logging.FileHandler):
46    """
47    Base class for handlers that rotate log files at a certain point.
48    Not meant to be instantiated directly.  Instead, use RotatingFileHandler
49    or TimedRotatingFileHandler.
50    """
51    namer = None
52    rotator = None
53
54    def __init__(self, filename, mode, encoding=None, delay=False, errors=None):
55        """
56        Use the specified filename for streamed logging
57        """
58        logging.FileHandler.__init__(self, filename, mode=mode,
59                                     encoding=encoding, delay=delay,
60                                     errors=errors)
61        self.mode = mode
62        self.encoding = encoding
63        self.errors = errors
64
65    def emit(self, record):
66        """
67        Emit a record.
68
69        Output the record to the file, catering for rollover as described
70        in doRollover().
71        """
72        try:
73            if self.shouldRollover(record):
74                self.doRollover()
75            logging.FileHandler.emit(self, record)
76        except Exception:
77            self.handleError(record)
78
79    def rotation_filename(self, default_name):
80        """
81        Modify the filename of a log file when rotating.
82
83        This is provided so that a custom filename can be provided.
84
85        The default implementation calls the 'namer' attribute of the
86        handler, if it's callable, passing the default name to
87        it. If the attribute isn't callable (the default is None), the name
88        is returned unchanged.
89
90        :param default_name: The default name for the log file.
91        """
92        if not callable(self.namer):
93            result = default_name
94        else:
95            result = self.namer(default_name)
96        return result
97
98    def rotate(self, source, dest):
99        """
100        When rotating, rotate the current log.
101
102        The default implementation calls the 'rotator' attribute of the
103        handler, if it's callable, passing the source and dest arguments to
104        it. If the attribute isn't callable (the default is None), the source
105        is simply renamed to the destination.
106
107        :param source: The source filename. This is normally the base
108                       filename, e.g. 'test.log'
109        :param dest:   The destination filename. This is normally
110                       what the source is rotated to, e.g. 'test.log.1'.
111        """
112        if not callable(self.rotator):
113            # Issue 18940: A file may not have been created if delay is True.
114            if os.path.exists(source):
115                os.rename(source, dest)
116        else:
117            self.rotator(source, dest)
118
119class RotatingFileHandler(BaseRotatingHandler):
120    """
121    Handler for logging to a set of files, which switches from one file
122    to the next when the current file reaches a certain size.
123    """
124    def __init__(self, filename, mode='a', maxBytes=0, backupCount=0,
125                 encoding=None, delay=False, errors=None):
126        """
127        Open the specified file and use it as the stream for logging.
128
129        By default, the file grows indefinitely. You can specify particular
130        values of maxBytes and backupCount to allow the file to rollover at
131        a predetermined size.
132
133        Rollover occurs whenever the current log file is nearly maxBytes in
134        length. If backupCount is >= 1, the system will successively create
135        new files with the same pathname as the base file, but with extensions
136        ".1", ".2" etc. appended to it. For example, with a backupCount of 5
137        and a base file name of "app.log", you would get "app.log",
138        "app.log.1", "app.log.2", ... through to "app.log.5". The file being
139        written to is always "app.log" - when it gets filled up, it is closed
140        and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
141        exist, then they are renamed to "app.log.2", "app.log.3" etc.
142        respectively.
143
144        If maxBytes is zero, rollover never occurs.
145        """
146        # If rotation/rollover is wanted, it doesn't make sense to use another
147        # mode. If for example 'w' were specified, then if there were multiple
148        # runs of the calling application, the logs from previous runs would be
149        # lost if the 'w' is respected, because the log file would be truncated
150        # on each run.
151        if maxBytes > 0:
152            mode = 'a'
153        if "b" not in mode:
154            encoding = io.text_encoding(encoding)
155        BaseRotatingHandler.__init__(self, filename, mode, encoding=encoding,
156                                     delay=delay, errors=errors)
157        self.maxBytes = maxBytes
158        self.backupCount = backupCount
159
160    def doRollover(self):
161        """
162        Do a rollover, as described in __init__().
163        """
164        if self.stream:
165            self.stream.close()
166            self.stream = None
167        if self.backupCount > 0:
168            for i in range(self.backupCount - 1, 0, -1):
169                sfn = self.rotation_filename("%s.%d" % (self.baseFilename, i))
170                dfn = self.rotation_filename("%s.%d" % (self.baseFilename,
171                                                        i + 1))
172                if os.path.exists(sfn):
173                    if os.path.exists(dfn):
174                        os.remove(dfn)
175                    os.rename(sfn, dfn)
176            dfn = self.rotation_filename(self.baseFilename + ".1")
177            if os.path.exists(dfn):
178                os.remove(dfn)
179            self.rotate(self.baseFilename, dfn)
180        if not self.delay:
181            self.stream = self._open()
182
183    def shouldRollover(self, record):
184        """
185        Determine if rollover should occur.
186
187        Basically, see if the supplied record would cause the file to exceed
188        the size limit we have.
189        """
190        # See bpo-45401: Never rollover anything other than regular files
191        if os.path.exists(self.baseFilename) and not os.path.isfile(self.baseFilename):
192            return False
193        if self.stream is None:                 # delay was set...
194            self.stream = self._open()
195        if self.maxBytes > 0:                   # are we rolling over?
196            msg = "%s\n" % self.format(record)
197            self.stream.seek(0, 2)  #due to non-posix-compliant Windows feature
198            if self.stream.tell() + len(msg) >= self.maxBytes:
199                return True
200        return False
201
202class TimedRotatingFileHandler(BaseRotatingHandler):
203    """
204    Handler for logging to a file, rotating the log file at certain timed
205    intervals.
206
207    If backupCount is > 0, when rollover is done, no more than backupCount
208    files are kept - the oldest ones are deleted.
209    """
210    def __init__(self, filename, when='h', interval=1, backupCount=0,
211                 encoding=None, delay=False, utc=False, atTime=None,
212                 errors=None):
213        encoding = io.text_encoding(encoding)
214        BaseRotatingHandler.__init__(self, filename, 'a', encoding=encoding,
215                                     delay=delay, errors=errors)
216        self.when = when.upper()
217        self.backupCount = backupCount
218        self.utc = utc
219        self.atTime = atTime
220        # Calculate the real rollover interval, which is just the number of
221        # seconds between rollovers.  Also set the filename suffix used when
222        # a rollover occurs.  Current 'when' events supported:
223        # S - Seconds
224        # M - Minutes
225        # H - Hours
226        # D - Days
227        # midnight - roll over at midnight
228        # W{0-6} - roll over on a certain day; 0 - Monday
229        #
230        # Case of the 'when' specifier is not important; lower or upper case
231        # will work.
232        if self.when == 'S':
233            self.interval = 1 # one second
234            self.suffix = "%Y-%m-%d_%H-%M-%S"
235            self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}(\.\w+)?$"
236        elif self.when == 'M':
237            self.interval = 60 # one minute
238            self.suffix = "%Y-%m-%d_%H-%M"
239            self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}(\.\w+)?$"
240        elif self.when == 'H':
241            self.interval = 60 * 60 # one hour
242            self.suffix = "%Y-%m-%d_%H"
243            self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}(\.\w+)?$"
244        elif self.when == 'D' or self.when == 'MIDNIGHT':
245            self.interval = 60 * 60 * 24 # one day
246            self.suffix = "%Y-%m-%d"
247            self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$"
248        elif self.when.startswith('W'):
249            self.interval = 60 * 60 * 24 * 7 # one week
250            if len(self.when) != 2:
251                raise ValueError("You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s" % self.when)
252            if self.when[1] < '0' or self.when[1] > '6':
253                raise ValueError("Invalid day specified for weekly rollover: %s" % self.when)
254            self.dayOfWeek = int(self.when[1])
255            self.suffix = "%Y-%m-%d"
256            self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$"
257        else:
258            raise ValueError("Invalid rollover interval specified: %s" % self.when)
259
260        self.extMatch = re.compile(self.extMatch, re.ASCII)
261        self.interval = self.interval * interval # multiply by units requested
262        # The following line added because the filename passed in could be a
263        # path object (see Issue #27493), but self.baseFilename will be a string
264        filename = self.baseFilename
265        if os.path.exists(filename):
266            t = os.stat(filename)[ST_MTIME]
267        else:
268            t = int(time.time())
269        self.rolloverAt = self.computeRollover(t)
270
271    def computeRollover(self, currentTime):
272        """
273        Work out the rollover time based on the specified time.
274        """
275        result = currentTime + self.interval
276        # If we are rolling over at midnight or weekly, then the interval is already known.
277        # What we need to figure out is WHEN the next interval is.  In other words,
278        # if you are rolling over at midnight, then your base interval is 1 day,
279        # but you want to start that one day clock at midnight, not now.  So, we
280        # have to fudge the rolloverAt value in order to trigger the first rollover
281        # at the right time.  After that, the regular interval will take care of
282        # the rest.  Note that this code doesn't care about leap seconds. :)
283        if self.when == 'MIDNIGHT' or self.when.startswith('W'):
284            # This could be done with less code, but I wanted it to be clear
285            if self.utc:
286                t = time.gmtime(currentTime)
287            else:
288                t = time.localtime(currentTime)
289            currentHour = t[3]
290            currentMinute = t[4]
291            currentSecond = t[5]
292            currentDay = t[6]
293            # r is the number of seconds left between now and the next rotation
294            if self.atTime is None:
295                rotate_ts = _MIDNIGHT
296            else:
297                rotate_ts = ((self.atTime.hour * 60 + self.atTime.minute)*60 +
298                    self.atTime.second)
299
300            r = rotate_ts - ((currentHour * 60 + currentMinute) * 60 +
301                currentSecond)
302            if r < 0:
303                # Rotate time is before the current time (for example when
304                # self.rotateAt is 13:45 and it now 14:15), rotation is
305                # tomorrow.
306                r += _MIDNIGHT
307                currentDay = (currentDay + 1) % 7
308            result = currentTime + r
309            # If we are rolling over on a certain day, add in the number of days until
310            # the next rollover, but offset by 1 since we just calculated the time
311            # until the next day starts.  There are three cases:
312            # Case 1) The day to rollover is today; in this case, do nothing
313            # Case 2) The day to rollover is further in the interval (i.e., today is
314            #         day 2 (Wednesday) and rollover is on day 6 (Sunday).  Days to
315            #         next rollover is simply 6 - 2 - 1, or 3.
316            # Case 3) The day to rollover is behind us in the interval (i.e., today
317            #         is day 5 (Saturday) and rollover is on day 3 (Thursday).
318            #         Days to rollover is 6 - 5 + 3, or 4.  In this case, it's the
319            #         number of days left in the current week (1) plus the number
320            #         of days in the next week until the rollover day (3).
321            # The calculations described in 2) and 3) above need to have a day added.
322            # This is because the above time calculation takes us to midnight on this
323            # day, i.e. the start of the next day.
324            if self.when.startswith('W'):
325                day = currentDay # 0 is Monday
326                if day != self.dayOfWeek:
327                    if day < self.dayOfWeek:
328                        daysToWait = self.dayOfWeek - day
329                    else:
330                        daysToWait = 6 - day + self.dayOfWeek + 1
331                    newRolloverAt = result + (daysToWait * (60 * 60 * 24))
332                    if not self.utc:
333                        dstNow = t[-1]
334                        dstAtRollover = time.localtime(newRolloverAt)[-1]
335                        if dstNow != dstAtRollover:
336                            if not dstNow:  # DST kicks in before next rollover, so we need to deduct an hour
337                                addend = -3600
338                            else:           # DST bows out before next rollover, so we need to add an hour
339                                addend = 3600
340                            newRolloverAt += addend
341                    result = newRolloverAt
342        return result
343
344    def shouldRollover(self, record):
345        """
346        Determine if rollover should occur.
347
348        record is not used, as we are just comparing times, but it is needed so
349        the method signatures are the same
350        """
351        # See bpo-45401: Never rollover anything other than regular files
352        if os.path.exists(self.baseFilename) and not os.path.isfile(self.baseFilename):
353            return False
354        t = int(time.time())
355        if t >= self.rolloverAt:
356            return True
357        return False
358
359    def getFilesToDelete(self):
360        """
361        Determine the files to delete when rolling over.
362
363        More specific than the earlier method, which just used glob.glob().
364        """
365        dirName, baseName = os.path.split(self.baseFilename)
366        fileNames = os.listdir(dirName)
367        result = []
368        # See bpo-44753: Don't use the extension when computing the prefix.
369        n, e = os.path.splitext(baseName)
370        prefix = n + '.'
371        plen = len(prefix)
372        for fileName in fileNames:
373            if self.namer is None:
374                # Our files will always start with baseName
375                if not fileName.startswith(baseName):
376                    continue
377            else:
378                # Our files could be just about anything after custom naming, but
379                # likely candidates are of the form
380                # foo.log.DATETIME_SUFFIX or foo.DATETIME_SUFFIX.log
381                if (not fileName.startswith(baseName) and fileName.endswith(e) and
382                    len(fileName) > (plen + 1) and not fileName[plen+1].isdigit()):
383                    continue
384
385            if fileName[:plen] == prefix:
386                suffix = fileName[plen:]
387                # See bpo-45628: The date/time suffix could be anywhere in the
388                # filename
389                parts = suffix.split('.')
390                for part in parts:
391                    if self.extMatch.match(part):
392                        result.append(os.path.join(dirName, fileName))
393                        break
394        if len(result) < self.backupCount:
395            result = []
396        else:
397            result.sort()
398            result = result[:len(result) - self.backupCount]
399        return result
400
401    def doRollover(self):
402        """
403        do a rollover; in this case, a date/time stamp is appended to the filename
404        when the rollover happens.  However, you want the file to be named for the
405        start of the interval, not the current time.  If there is a backup count,
406        then we have to get a list of matching filenames, sort them and remove
407        the one with the oldest suffix.
408        """
409        if self.stream:
410            self.stream.close()
411            self.stream = None
412        # get the time that this sequence started at and make it a TimeTuple
413        currentTime = int(time.time())
414        dstNow = time.localtime(currentTime)[-1]
415        t = self.rolloverAt - self.interval
416        if self.utc:
417            timeTuple = time.gmtime(t)
418        else:
419            timeTuple = time.localtime(t)
420            dstThen = timeTuple[-1]
421            if dstNow != dstThen:
422                if dstNow:
423                    addend = 3600
424                else:
425                    addend = -3600
426                timeTuple = time.localtime(t + addend)
427        dfn = self.rotation_filename(self.baseFilename + "." +
428                                     time.strftime(self.suffix, timeTuple))
429        if os.path.exists(dfn):
430            os.remove(dfn)
431        self.rotate(self.baseFilename, dfn)
432        if self.backupCount > 0:
433            for s in self.getFilesToDelete():
434                os.remove(s)
435        if not self.delay:
436            self.stream = self._open()
437        newRolloverAt = self.computeRollover(currentTime)
438        while newRolloverAt <= currentTime:
439            newRolloverAt = newRolloverAt + self.interval
440        #If DST changes and midnight or weekly rollover, adjust for this.
441        if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc:
442            dstAtRollover = time.localtime(newRolloverAt)[-1]
443            if dstNow != dstAtRollover:
444                if not dstNow:  # DST kicks in before next rollover, so we need to deduct an hour
445                    addend = -3600
446                else:           # DST bows out before next rollover, so we need to add an hour
447                    addend = 3600
448                newRolloverAt += addend
449        self.rolloverAt = newRolloverAt
450
451class WatchedFileHandler(logging.FileHandler):
452    """
453    A handler for logging to a file, which watches the file
454    to see if it has changed while in use. This can happen because of
455    usage of programs such as newsyslog and logrotate which perform
456    log file rotation. This handler, intended for use under Unix,
457    watches the file to see if it has changed since the last emit.
458    (A file has changed if its device or inode have changed.)
459    If it has changed, the old file stream is closed, and the file
460    opened to get a new stream.
461
462    This handler is not appropriate for use under Windows, because
463    under Windows open files cannot be moved or renamed - logging
464    opens the files with exclusive locks - and so there is no need
465    for such a handler. Furthermore, ST_INO is not supported under
466    Windows; stat always returns zero for this value.
467
468    This handler is based on a suggestion and patch by Chad J.
469    Schroeder.
470    """
471    def __init__(self, filename, mode='a', encoding=None, delay=False,
472                 errors=None):
473        if "b" not in mode:
474            encoding = io.text_encoding(encoding)
475        logging.FileHandler.__init__(self, filename, mode=mode,
476                                     encoding=encoding, delay=delay,
477                                     errors=errors)
478        self.dev, self.ino = -1, -1
479        self._statstream()
480
481    def _statstream(self):
482        if self.stream:
483            sres = os.fstat(self.stream.fileno())
484            self.dev, self.ino = sres[ST_DEV], sres[ST_INO]
485
486    def reopenIfNeeded(self):
487        """
488        Reopen log file if needed.
489
490        Checks if the underlying file has changed, and if it
491        has, close the old stream and reopen the file to get the
492        current stream.
493        """
494        # Reduce the chance of race conditions by stat'ing by path only
495        # once and then fstat'ing our new fd if we opened a new log stream.
496        # See issue #14632: Thanks to John Mulligan for the problem report
497        # and patch.
498        try:
499            # stat the file by path, checking for existence
500            sres = os.stat(self.baseFilename)
501        except FileNotFoundError:
502            sres = None
503        # compare file system stat with that of our stream file handle
504        if not sres or sres[ST_DEV] != self.dev or sres[ST_INO] != self.ino:
505            if self.stream is not None:
506                # we have an open file handle, clean it up
507                self.stream.flush()
508                self.stream.close()
509                self.stream = None  # See Issue #21742: _open () might fail.
510                # open a new file handle and get new stat info from that fd
511                self.stream = self._open()
512                self._statstream()
513
514    def emit(self, record):
515        """
516        Emit a record.
517
518        If underlying file has changed, reopen the file before emitting the
519        record to it.
520        """
521        self.reopenIfNeeded()
522        logging.FileHandler.emit(self, record)
523
524
525class SocketHandler(logging.Handler):
526    """
527    A handler class which writes logging records, in pickle format, to
528    a streaming socket. The socket is kept open across logging calls.
529    If the peer resets it, an attempt is made to reconnect on the next call.
530    The pickle which is sent is that of the LogRecord's attribute dictionary
531    (__dict__), so that the receiver does not need to have the logging module
532    installed in order to process the logging event.
533
534    To unpickle the record at the receiving end into a LogRecord, use the
535    makeLogRecord function.
536    """
537
538    def __init__(self, host, port):
539        """
540        Initializes the handler with a specific host address and port.
541
542        When the attribute *closeOnError* is set to True - if a socket error
543        occurs, the socket is silently closed and then reopened on the next
544        logging call.
545        """
546        logging.Handler.__init__(self)
547        self.host = host
548        self.port = port
549        if port is None:
550            self.address = host
551        else:
552            self.address = (host, port)
553        self.sock = None
554        self.closeOnError = False
555        self.retryTime = None
556        #
557        # Exponential backoff parameters.
558        #
559        self.retryStart = 1.0
560        self.retryMax = 30.0
561        self.retryFactor = 2.0
562
563    def makeSocket(self, timeout=1):
564        """
565        A factory method which allows subclasses to define the precise
566        type of socket they want.
567        """
568        if self.port is not None:
569            result = socket.create_connection(self.address, timeout=timeout)
570        else:
571            result = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
572            result.settimeout(timeout)
573            try:
574                result.connect(self.address)
575            except OSError:
576                result.close()  # Issue 19182
577                raise
578        return result
579
580    def createSocket(self):
581        """
582        Try to create a socket, using an exponential backoff with
583        a max retry time. Thanks to Robert Olson for the original patch
584        (SF #815911) which has been slightly refactored.
585        """
586        now = time.time()
587        # Either retryTime is None, in which case this
588        # is the first time back after a disconnect, or
589        # we've waited long enough.
590        if self.retryTime is None:
591            attempt = True
592        else:
593            attempt = (now >= self.retryTime)
594        if attempt:
595            try:
596                self.sock = self.makeSocket()
597                self.retryTime = None # next time, no delay before trying
598            except OSError:
599                #Creation failed, so set the retry time and return.
600                if self.retryTime is None:
601                    self.retryPeriod = self.retryStart
602                else:
603                    self.retryPeriod = self.retryPeriod * self.retryFactor
604                    if self.retryPeriod > self.retryMax:
605                        self.retryPeriod = self.retryMax
606                self.retryTime = now + self.retryPeriod
607
608    def send(self, s):
609        """
610        Send a pickled string to the socket.
611
612        This function allows for partial sends which can happen when the
613        network is busy.
614        """
615        if self.sock is None:
616            self.createSocket()
617        #self.sock can be None either because we haven't reached the retry
618        #time yet, or because we have reached the retry time and retried,
619        #but are still unable to connect.
620        if self.sock:
621            try:
622                self.sock.sendall(s)
623            except OSError: #pragma: no cover
624                self.sock.close()
625                self.sock = None  # so we can call createSocket next time
626
627    def makePickle(self, record):
628        """
629        Pickles the record in binary format with a length prefix, and
630        returns it ready for transmission across the socket.
631        """
632        ei = record.exc_info
633        if ei:
634            # just to get traceback text into record.exc_text ...
635            dummy = self.format(record)
636        # See issue #14436: If msg or args are objects, they may not be
637        # available on the receiving end. So we convert the msg % args
638        # to a string, save it as msg and zap the args.
639        d = dict(record.__dict__)
640        d['msg'] = record.getMessage()
641        d['args'] = None
642        d['exc_info'] = None
643        # Issue #25685: delete 'message' if present: redundant with 'msg'
644        d.pop('message', None)
645        s = pickle.dumps(d, 1)
646        slen = struct.pack(">L", len(s))
647        return slen + s
648
649    def handleError(self, record):
650        """
651        Handle an error during logging.
652
653        An error has occurred during logging. Most likely cause -
654        connection lost. Close the socket so that we can retry on the
655        next event.
656        """
657        if self.closeOnError and self.sock:
658            self.sock.close()
659            self.sock = None        #try to reconnect next time
660        else:
661            logging.Handler.handleError(self, record)
662
663    def emit(self, record):
664        """
665        Emit a record.
666
667        Pickles the record and writes it to the socket in binary format.
668        If there is an error with the socket, silently drop the packet.
669        If there was a problem with the socket, re-establishes the
670        socket.
671        """
672        try:
673            s = self.makePickle(record)
674            self.send(s)
675        except Exception:
676            self.handleError(record)
677
678    def close(self):
679        """
680        Closes the socket.
681        """
682        self.acquire()
683        try:
684            sock = self.sock
685            if sock:
686                self.sock = None
687                sock.close()
688            logging.Handler.close(self)
689        finally:
690            self.release()
691
692class DatagramHandler(SocketHandler):
693    """
694    A handler class which writes logging records, in pickle format, to
695    a datagram socket.  The pickle which is sent is that of the LogRecord's
696    attribute dictionary (__dict__), so that the receiver does not need to
697    have the logging module installed in order to process the logging event.
698
699    To unpickle the record at the receiving end into a LogRecord, use the
700    makeLogRecord function.
701
702    """
703    def __init__(self, host, port):
704        """
705        Initializes the handler with a specific host address and port.
706        """
707        SocketHandler.__init__(self, host, port)
708        self.closeOnError = False
709
710    def makeSocket(self):
711        """
712        The factory method of SocketHandler is here overridden to create
713        a UDP socket (SOCK_DGRAM).
714        """
715        if self.port is None:
716            family = socket.AF_UNIX
717        else:
718            family = socket.AF_INET
719        s = socket.socket(family, socket.SOCK_DGRAM)
720        return s
721
722    def send(self, s):
723        """
724        Send a pickled string to a socket.
725
726        This function no longer allows for partial sends which can happen
727        when the network is busy - UDP does not guarantee delivery and
728        can deliver packets out of sequence.
729        """
730        if self.sock is None:
731            self.createSocket()
732        self.sock.sendto(s, self.address)
733
734class SysLogHandler(logging.Handler):
735    """
736    A handler class which sends formatted logging records to a syslog
737    server. Based on Sam Rushing's syslog module:
738    http://www.nightmare.com/squirl/python-ext/misc/syslog.py
739    Contributed by Nicolas Untz (after which minor refactoring changes
740    have been made).
741    """
742
743    # from <linux/sys/syslog.h>:
744    # ======================================================================
745    # priorities/facilities are encoded into a single 32-bit quantity, where
746    # the bottom 3 bits are the priority (0-7) and the top 28 bits are the
747    # facility (0-big number). Both the priorities and the facilities map
748    # roughly one-to-one to strings in the syslogd(8) source code.  This
749    # mapping is included in this file.
750    #
751    # priorities (these are ordered)
752
753    LOG_EMERG     = 0       #  system is unusable
754    LOG_ALERT     = 1       #  action must be taken immediately
755    LOG_CRIT      = 2       #  critical conditions
756    LOG_ERR       = 3       #  error conditions
757    LOG_WARNING   = 4       #  warning conditions
758    LOG_NOTICE    = 5       #  normal but significant condition
759    LOG_INFO      = 6       #  informational
760    LOG_DEBUG     = 7       #  debug-level messages
761
762    #  facility codes
763    LOG_KERN      = 0       #  kernel messages
764    LOG_USER      = 1       #  random user-level messages
765    LOG_MAIL      = 2       #  mail system
766    LOG_DAEMON    = 3       #  system daemons
767    LOG_AUTH      = 4       #  security/authorization messages
768    LOG_SYSLOG    = 5       #  messages generated internally by syslogd
769    LOG_LPR       = 6       #  line printer subsystem
770    LOG_NEWS      = 7       #  network news subsystem
771    LOG_UUCP      = 8       #  UUCP subsystem
772    LOG_CRON      = 9       #  clock daemon
773    LOG_AUTHPRIV  = 10      #  security/authorization messages (private)
774    LOG_FTP       = 11      #  FTP daemon
775    LOG_NTP       = 12      #  NTP subsystem
776    LOG_SECURITY  = 13      #  Log audit
777    LOG_CONSOLE   = 14      #  Log alert
778    LOG_SOLCRON   = 15      #  Scheduling daemon (Solaris)
779
780    #  other codes through 15 reserved for system use
781    LOG_LOCAL0    = 16      #  reserved for local use
782    LOG_LOCAL1    = 17      #  reserved for local use
783    LOG_LOCAL2    = 18      #  reserved for local use
784    LOG_LOCAL3    = 19      #  reserved for local use
785    LOG_LOCAL4    = 20      #  reserved for local use
786    LOG_LOCAL5    = 21      #  reserved for local use
787    LOG_LOCAL6    = 22      #  reserved for local use
788    LOG_LOCAL7    = 23      #  reserved for local use
789
790    priority_names = {
791        "alert":    LOG_ALERT,
792        "crit":     LOG_CRIT,
793        "critical": LOG_CRIT,
794        "debug":    LOG_DEBUG,
795        "emerg":    LOG_EMERG,
796        "err":      LOG_ERR,
797        "error":    LOG_ERR,        #  DEPRECATED
798        "info":     LOG_INFO,
799        "notice":   LOG_NOTICE,
800        "panic":    LOG_EMERG,      #  DEPRECATED
801        "warn":     LOG_WARNING,    #  DEPRECATED
802        "warning":  LOG_WARNING,
803        }
804
805    facility_names = {
806        "auth":         LOG_AUTH,
807        "authpriv":     LOG_AUTHPRIV,
808        "console":      LOG_CONSOLE,
809        "cron":         LOG_CRON,
810        "daemon":       LOG_DAEMON,
811        "ftp":          LOG_FTP,
812        "kern":         LOG_KERN,
813        "lpr":          LOG_LPR,
814        "mail":         LOG_MAIL,
815        "news":         LOG_NEWS,
816        "ntp":          LOG_NTP,
817        "security":     LOG_SECURITY,
818        "solaris-cron": LOG_SOLCRON,
819        "syslog":       LOG_SYSLOG,
820        "user":         LOG_USER,
821        "uucp":         LOG_UUCP,
822        "local0":       LOG_LOCAL0,
823        "local1":       LOG_LOCAL1,
824        "local2":       LOG_LOCAL2,
825        "local3":       LOG_LOCAL3,
826        "local4":       LOG_LOCAL4,
827        "local5":       LOG_LOCAL5,
828        "local6":       LOG_LOCAL6,
829        "local7":       LOG_LOCAL7,
830        }
831
832    #The map below appears to be trivially lowercasing the key. However,
833    #there's more to it than meets the eye - in some locales, lowercasing
834    #gives unexpected results. See SF #1524081: in the Turkish locale,
835    #"INFO".lower() != "info"
836    priority_map = {
837        "DEBUG" : "debug",
838        "INFO" : "info",
839        "WARNING" : "warning",
840        "ERROR" : "error",
841        "CRITICAL" : "critical"
842    }
843
844    def __init__(self, address=('localhost', SYSLOG_UDP_PORT),
845                 facility=LOG_USER, socktype=None):
846        """
847        Initialize a handler.
848
849        If address is specified as a string, a UNIX socket is used. To log to a
850        local syslogd, "SysLogHandler(address="/dev/log")" can be used.
851        If facility is not specified, LOG_USER is used. If socktype is
852        specified as socket.SOCK_DGRAM or socket.SOCK_STREAM, that specific
853        socket type will be used. For Unix sockets, you can also specify a
854        socktype of None, in which case socket.SOCK_DGRAM will be used, falling
855        back to socket.SOCK_STREAM.
856        """
857        logging.Handler.__init__(self)
858
859        self.address = address
860        self.facility = facility
861        self.socktype = socktype
862
863        if isinstance(address, str):
864            self.unixsocket = True
865            # Syslog server may be unavailable during handler initialisation.
866            # C's openlog() function also ignores connection errors.
867            # Moreover, we ignore these errors while logging, so it not worse
868            # to ignore it also here.
869            try:
870                self._connect_unixsocket(address)
871            except OSError:
872                pass
873        else:
874            self.unixsocket = False
875            if socktype is None:
876                socktype = socket.SOCK_DGRAM
877            host, port = address
878            ress = socket.getaddrinfo(host, port, 0, socktype)
879            if not ress:
880                raise OSError("getaddrinfo returns an empty list")
881            for res in ress:
882                af, socktype, proto, _, sa = res
883                err = sock = None
884                try:
885                    sock = socket.socket(af, socktype, proto)
886                    if socktype == socket.SOCK_STREAM:
887                        sock.connect(sa)
888                    break
889                except OSError as exc:
890                    err = exc
891                    if sock is not None:
892                        sock.close()
893            if err is not None:
894                raise err
895            self.socket = sock
896            self.socktype = socktype
897
898    def _connect_unixsocket(self, address):
899        use_socktype = self.socktype
900        if use_socktype is None:
901            use_socktype = socket.SOCK_DGRAM
902        self.socket = socket.socket(socket.AF_UNIX, use_socktype)
903        try:
904            self.socket.connect(address)
905            # it worked, so set self.socktype to the used type
906            self.socktype = use_socktype
907        except OSError:
908            self.socket.close()
909            if self.socktype is not None:
910                # user didn't specify falling back, so fail
911                raise
912            use_socktype = socket.SOCK_STREAM
913            self.socket = socket.socket(socket.AF_UNIX, use_socktype)
914            try:
915                self.socket.connect(address)
916                # it worked, so set self.socktype to the used type
917                self.socktype = use_socktype
918            except OSError:
919                self.socket.close()
920                raise
921
922    def encodePriority(self, facility, priority):
923        """
924        Encode the facility and priority. You can pass in strings or
925        integers - if strings are passed, the facility_names and
926        priority_names mapping dictionaries are used to convert them to
927        integers.
928        """
929        if isinstance(facility, str):
930            facility = self.facility_names[facility]
931        if isinstance(priority, str):
932            priority = self.priority_names[priority]
933        return (facility << 3) | priority
934
935    def close(self):
936        """
937        Closes the socket.
938        """
939        self.acquire()
940        try:
941            self.socket.close()
942            logging.Handler.close(self)
943        finally:
944            self.release()
945
946    def mapPriority(self, levelName):
947        """
948        Map a logging level name to a key in the priority_names map.
949        This is useful in two scenarios: when custom levels are being
950        used, and in the case where you can't do a straightforward
951        mapping by lowercasing the logging level name because of locale-
952        specific issues (see SF #1524081).
953        """
954        return self.priority_map.get(levelName, "warning")
955
956    ident = ''          # prepended to all messages
957    append_nul = True   # some old syslog daemons expect a NUL terminator
958
959    def emit(self, record):
960        """
961        Emit a record.
962
963        The record is formatted, and then sent to the syslog server. If
964        exception information is present, it is NOT sent to the server.
965        """
966        try:
967            msg = self.format(record)
968            if self.ident:
969                msg = self.ident + msg
970            if self.append_nul:
971                msg += '\000'
972
973            # We need to convert record level to lowercase, maybe this will
974            # change in the future.
975            prio = '<%d>' % self.encodePriority(self.facility,
976                                                self.mapPriority(record.levelname))
977            prio = prio.encode('utf-8')
978            # Message is a string. Convert to bytes as required by RFC 5424
979            msg = msg.encode('utf-8')
980            msg = prio + msg
981            if self.unixsocket:
982                try:
983                    self.socket.send(msg)
984                except OSError:
985                    self.socket.close()
986                    self._connect_unixsocket(self.address)
987                    self.socket.send(msg)
988            elif self.socktype == socket.SOCK_DGRAM:
989                self.socket.sendto(msg, self.address)
990            else:
991                self.socket.sendall(msg)
992        except Exception:
993            self.handleError(record)
994
995class SMTPHandler(logging.Handler):
996    """
997    A handler class which sends an SMTP email for each logging event.
998    """
999    def __init__(self, mailhost, fromaddr, toaddrs, subject,
1000                 credentials=None, secure=None, timeout=5.0):
1001        """
1002        Initialize the handler.
1003
1004        Initialize the instance with the from and to addresses and subject
1005        line of the email. To specify a non-standard SMTP port, use the
1006        (host, port) tuple format for the mailhost argument. To specify
1007        authentication credentials, supply a (username, password) tuple
1008        for the credentials argument. To specify the use of a secure
1009        protocol (TLS), pass in a tuple for the secure argument. This will
1010        only be used when authentication credentials are supplied. The tuple
1011        will be either an empty tuple, or a single-value tuple with the name
1012        of a keyfile, or a 2-value tuple with the names of the keyfile and
1013        certificate file. (This tuple is passed to the `starttls` method).
1014        A timeout in seconds can be specified for the SMTP connection (the
1015        default is one second).
1016        """
1017        logging.Handler.__init__(self)
1018        if isinstance(mailhost, (list, tuple)):
1019            self.mailhost, self.mailport = mailhost
1020        else:
1021            self.mailhost, self.mailport = mailhost, None
1022        if isinstance(credentials, (list, tuple)):
1023            self.username, self.password = credentials
1024        else:
1025            self.username = None
1026        self.fromaddr = fromaddr
1027        if isinstance(toaddrs, str):
1028            toaddrs = [toaddrs]
1029        self.toaddrs = toaddrs
1030        self.subject = subject
1031        self.secure = secure
1032        self.timeout = timeout
1033
1034    def getSubject(self, record):
1035        """
1036        Determine the subject for the email.
1037
1038        If you want to specify a subject line which is record-dependent,
1039        override this method.
1040        """
1041        return self.subject
1042
1043    def emit(self, record):
1044        """
1045        Emit a record.
1046
1047        Format the record and send it to the specified addressees.
1048        """
1049        try:
1050            import smtplib
1051            from email.message import EmailMessage
1052            import email.utils
1053
1054            port = self.mailport
1055            if not port:
1056                port = smtplib.SMTP_PORT
1057            smtp = smtplib.SMTP(self.mailhost, port, timeout=self.timeout)
1058            msg = EmailMessage()
1059            msg['From'] = self.fromaddr
1060            msg['To'] = ','.join(self.toaddrs)
1061            msg['Subject'] = self.getSubject(record)
1062            msg['Date'] = email.utils.localtime()
1063            msg.set_content(self.format(record))
1064            if self.username:
1065                if self.secure is not None:
1066                    smtp.ehlo()
1067                    smtp.starttls(*self.secure)
1068                    smtp.ehlo()
1069                smtp.login(self.username, self.password)
1070            smtp.send_message(msg)
1071            smtp.quit()
1072        except Exception:
1073            self.handleError(record)
1074
1075class NTEventLogHandler(logging.Handler):
1076    """
1077    A handler class which sends events to the NT Event Log. Adds a
1078    registry entry for the specified application name. If no dllname is
1079    provided, win32service.pyd (which contains some basic message
1080    placeholders) is used. Note that use of these placeholders will make
1081    your event logs big, as the entire message source is held in the log.
1082    If you want slimmer logs, you have to pass in the name of your own DLL
1083    which contains the message definitions you want to use in the event log.
1084    """
1085    def __init__(self, appname, dllname=None, logtype="Application"):
1086        logging.Handler.__init__(self)
1087        try:
1088            import win32evtlogutil, win32evtlog
1089            self.appname = appname
1090            self._welu = win32evtlogutil
1091            if not dllname:
1092                dllname = os.path.split(self._welu.__file__)
1093                dllname = os.path.split(dllname[0])
1094                dllname = os.path.join(dllname[0], r'win32service.pyd')
1095            self.dllname = dllname
1096            self.logtype = logtype
1097            self._welu.AddSourceToRegistry(appname, dllname, logtype)
1098            self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE
1099            self.typemap = {
1100                logging.DEBUG   : win32evtlog.EVENTLOG_INFORMATION_TYPE,
1101                logging.INFO    : win32evtlog.EVENTLOG_INFORMATION_TYPE,
1102                logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE,
1103                logging.ERROR   : win32evtlog.EVENTLOG_ERROR_TYPE,
1104                logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE,
1105         }
1106        except ImportError:
1107            print("The Python Win32 extensions for NT (service, event "\
1108                        "logging) appear not to be available.")
1109            self._welu = None
1110
1111    def getMessageID(self, record):
1112        """
1113        Return the message ID for the event record. If you are using your
1114        own messages, you could do this by having the msg passed to the
1115        logger being an ID rather than a formatting string. Then, in here,
1116        you could use a dictionary lookup to get the message ID. This
1117        version returns 1, which is the base message ID in win32service.pyd.
1118        """
1119        return 1
1120
1121    def getEventCategory(self, record):
1122        """
1123        Return the event category for the record.
1124
1125        Override this if you want to specify your own categories. This version
1126        returns 0.
1127        """
1128        return 0
1129
1130    def getEventType(self, record):
1131        """
1132        Return the event type for the record.
1133
1134        Override this if you want to specify your own types. This version does
1135        a mapping using the handler's typemap attribute, which is set up in
1136        __init__() to a dictionary which contains mappings for DEBUG, INFO,
1137        WARNING, ERROR and CRITICAL. If you are using your own levels you will
1138        either need to override this method or place a suitable dictionary in
1139        the handler's typemap attribute.
1140        """
1141        return self.typemap.get(record.levelno, self.deftype)
1142
1143    def emit(self, record):
1144        """
1145        Emit a record.
1146
1147        Determine the message ID, event category and event type. Then
1148        log the message in the NT event log.
1149        """
1150        if self._welu:
1151            try:
1152                id = self.getMessageID(record)
1153                cat = self.getEventCategory(record)
1154                type = self.getEventType(record)
1155                msg = self.format(record)
1156                self._welu.ReportEvent(self.appname, id, cat, type, [msg])
1157            except Exception:
1158                self.handleError(record)
1159
1160    def close(self):
1161        """
1162        Clean up this handler.
1163
1164        You can remove the application name from the registry as a
1165        source of event log entries. However, if you do this, you will
1166        not be able to see the events as you intended in the Event Log
1167        Viewer - it needs to be able to access the registry to get the
1168        DLL name.
1169        """
1170        #self._welu.RemoveSourceFromRegistry(self.appname, self.logtype)
1171        logging.Handler.close(self)
1172
1173class HTTPHandler(logging.Handler):
1174    """
1175    A class which sends records to a web server, using either GET or
1176    POST semantics.
1177    """
1178    def __init__(self, host, url, method="GET", secure=False, credentials=None,
1179                 context=None):
1180        """
1181        Initialize the instance with the host, the request URL, and the method
1182        ("GET" or "POST")
1183        """
1184        logging.Handler.__init__(self)
1185        method = method.upper()
1186        if method not in ["GET", "POST"]:
1187            raise ValueError("method must be GET or POST")
1188        if not secure and context is not None:
1189            raise ValueError("context parameter only makes sense "
1190                             "with secure=True")
1191        self.host = host
1192        self.url = url
1193        self.method = method
1194        self.secure = secure
1195        self.credentials = credentials
1196        self.context = context
1197
1198    def mapLogRecord(self, record):
1199        """
1200        Default implementation of mapping the log record into a dict
1201        that is sent as the CGI data. Overwrite in your class.
1202        Contributed by Franz Glasner.
1203        """
1204        return record.__dict__
1205
1206    def getConnection(self, host, secure):
1207        """
1208        get a HTTP[S]Connection.
1209
1210        Override when a custom connection is required, for example if
1211        there is a proxy.
1212        """
1213        import http.client
1214        if secure:
1215            connection = http.client.HTTPSConnection(host, context=self.context)
1216        else:
1217            connection = http.client.HTTPConnection(host)
1218        return connection
1219
1220    def emit(self, record):
1221        """
1222        Emit a record.
1223
1224        Send the record to the web server as a percent-encoded dictionary
1225        """
1226        try:
1227            import urllib.parse
1228            host = self.host
1229            h = self.getConnection(host, self.secure)
1230            url = self.url
1231            data = urllib.parse.urlencode(self.mapLogRecord(record))
1232            if self.method == "GET":
1233                if (url.find('?') >= 0):
1234                    sep = '&'
1235                else:
1236                    sep = '?'
1237                url = url + "%c%s" % (sep, data)
1238            h.putrequest(self.method, url)
1239            # support multiple hosts on one IP address...
1240            # need to strip optional :port from host, if present
1241            i = host.find(":")
1242            if i >= 0:
1243                host = host[:i]
1244            # See issue #30904: putrequest call above already adds this header
1245            # on Python 3.x.
1246            # h.putheader("Host", host)
1247            if self.method == "POST":
1248                h.putheader("Content-type",
1249                            "application/x-www-form-urlencoded")
1250                h.putheader("Content-length", str(len(data)))
1251            if self.credentials:
1252                import base64
1253                s = ('%s:%s' % self.credentials).encode('utf-8')
1254                s = 'Basic ' + base64.b64encode(s).strip().decode('ascii')
1255                h.putheader('Authorization', s)
1256            h.endheaders()
1257            if self.method == "POST":
1258                h.send(data.encode('utf-8'))
1259            h.getresponse()    #can't do anything with the result
1260        except Exception:
1261            self.handleError(record)
1262
1263class BufferingHandler(logging.Handler):
1264    """
1265  A handler class which buffers logging records in memory. Whenever each
1266  record is added to the buffer, a check is made to see if the buffer should
1267  be flushed. If it should, then flush() is expected to do what's needed.
1268    """
1269    def __init__(self, capacity):
1270        """
1271        Initialize the handler with the buffer size.
1272        """
1273        logging.Handler.__init__(self)
1274        self.capacity = capacity
1275        self.buffer = []
1276
1277    def shouldFlush(self, record):
1278        """
1279        Should the handler flush its buffer?
1280
1281        Returns true if the buffer is up to capacity. This method can be
1282        overridden to implement custom flushing strategies.
1283        """
1284        return (len(self.buffer) >= self.capacity)
1285
1286    def emit(self, record):
1287        """
1288        Emit a record.
1289
1290        Append the record. If shouldFlush() tells us to, call flush() to process
1291        the buffer.
1292        """
1293        self.buffer.append(record)
1294        if self.shouldFlush(record):
1295            self.flush()
1296
1297    def flush(self):
1298        """
1299        Override to implement custom flushing behaviour.
1300
1301        This version just zaps the buffer to empty.
1302        """
1303        self.acquire()
1304        try:
1305            self.buffer.clear()
1306        finally:
1307            self.release()
1308
1309    def close(self):
1310        """
1311        Close the handler.
1312
1313        This version just flushes and chains to the parent class' close().
1314        """
1315        try:
1316            self.flush()
1317        finally:
1318            logging.Handler.close(self)
1319
1320class MemoryHandler(BufferingHandler):
1321    """
1322    A handler class which buffers logging records in memory, periodically
1323    flushing them to a target handler. Flushing occurs whenever the buffer
1324    is full, or when an event of a certain severity or greater is seen.
1325    """
1326    def __init__(self, capacity, flushLevel=logging.ERROR, target=None,
1327                 flushOnClose=True):
1328        """
1329        Initialize the handler with the buffer size, the level at which
1330        flushing should occur and an optional target.
1331
1332        Note that without a target being set either here or via setTarget(),
1333        a MemoryHandler is no use to anyone!
1334
1335        The ``flushOnClose`` argument is ``True`` for backward compatibility
1336        reasons - the old behaviour is that when the handler is closed, the
1337        buffer is flushed, even if the flush level hasn't been exceeded nor the
1338        capacity exceeded. To prevent this, set ``flushOnClose`` to ``False``.
1339        """
1340        BufferingHandler.__init__(self, capacity)
1341        self.flushLevel = flushLevel
1342        self.target = target
1343        # See Issue #26559 for why this has been added
1344        self.flushOnClose = flushOnClose
1345
1346    def shouldFlush(self, record):
1347        """
1348        Check for buffer full or a record at the flushLevel or higher.
1349        """
1350        return (len(self.buffer) >= self.capacity) or \
1351                (record.levelno >= self.flushLevel)
1352
1353    def setTarget(self, target):
1354        """
1355        Set the target handler for this handler.
1356        """
1357        self.acquire()
1358        try:
1359            self.target = target
1360        finally:
1361            self.release()
1362
1363    def flush(self):
1364        """
1365        For a MemoryHandler, flushing means just sending the buffered
1366        records to the target, if there is one. Override if you want
1367        different behaviour.
1368
1369        The record buffer is also cleared by this operation.
1370        """
1371        self.acquire()
1372        try:
1373            if self.target:
1374                for record in self.buffer:
1375                    self.target.handle(record)
1376                self.buffer.clear()
1377        finally:
1378            self.release()
1379
1380    def close(self):
1381        """
1382        Flush, if appropriately configured, set the target to None and lose the
1383        buffer.
1384        """
1385        try:
1386            if self.flushOnClose:
1387                self.flush()
1388        finally:
1389            self.acquire()
1390            try:
1391                self.target = None
1392                BufferingHandler.close(self)
1393            finally:
1394                self.release()
1395
1396
1397class QueueHandler(logging.Handler):
1398    """
1399    This handler sends events to a queue. Typically, it would be used together
1400    with a multiprocessing Queue to centralise logging to file in one process
1401    (in a multi-process application), so as to avoid file write contention
1402    between processes.
1403
1404    This code is new in Python 3.2, but this class can be copy pasted into
1405    user code for use with earlier Python versions.
1406    """
1407
1408    def __init__(self, queue):
1409        """
1410        Initialise an instance, using the passed queue.
1411        """
1412        logging.Handler.__init__(self)
1413        self.queue = queue
1414
1415    def enqueue(self, record):
1416        """
1417        Enqueue a record.
1418
1419        The base implementation uses put_nowait. You may want to override
1420        this method if you want to use blocking, timeouts or custom queue
1421        implementations.
1422        """
1423        self.queue.put_nowait(record)
1424
1425    def prepare(self, record):
1426        """
1427        Prepares a record for queuing. The object returned by this method is
1428        enqueued.
1429
1430        The base implementation formats the record to merge the message
1431        and arguments, and removes unpickleable items from the record
1432        in-place.
1433
1434        You might want to override this method if you want to convert
1435        the record to a dict or JSON string, or send a modified copy
1436        of the record while leaving the original intact.
1437        """
1438        # The format operation gets traceback text into record.exc_text
1439        # (if there's exception data), and also returns the formatted
1440        # message. We can then use this to replace the original
1441        # msg + args, as these might be unpickleable. We also zap the
1442        # exc_info and exc_text attributes, as they are no longer
1443        # needed and, if not None, will typically not be pickleable.
1444        msg = self.format(record)
1445        # bpo-35726: make copy of record to avoid affecting other handlers in the chain.
1446        record = copy.copy(record)
1447        record.message = msg
1448        record.msg = msg
1449        record.args = None
1450        record.exc_info = None
1451        record.exc_text = None
1452        return record
1453
1454    def emit(self, record):
1455        """
1456        Emit a record.
1457
1458        Writes the LogRecord to the queue, preparing it for pickling first.
1459        """
1460        try:
1461            self.enqueue(self.prepare(record))
1462        except Exception:
1463            self.handleError(record)
1464
1465
1466class QueueListener(object):
1467    """
1468    This class implements an internal threaded listener which watches for
1469    LogRecords being added to a queue, removes them and passes them to a
1470    list of handlers for processing.
1471    """
1472    _sentinel = None
1473
1474    def __init__(self, queue, *handlers, respect_handler_level=False):
1475        """
1476        Initialise an instance with the specified queue and
1477        handlers.
1478        """
1479        self.queue = queue
1480        self.handlers = handlers
1481        self._thread = None
1482        self.respect_handler_level = respect_handler_level
1483
1484    def dequeue(self, block):
1485        """
1486        Dequeue a record and return it, optionally blocking.
1487
1488        The base implementation uses get. You may want to override this method
1489        if you want to use timeouts or work with custom queue implementations.
1490        """
1491        return self.queue.get(block)
1492
1493    def start(self):
1494        """
1495        Start the listener.
1496
1497        This starts up a background thread to monitor the queue for
1498        LogRecords to process.
1499        """
1500        self._thread = t = threading.Thread(target=self._monitor)
1501        t.daemon = True
1502        t.start()
1503
1504    def prepare(self, record):
1505        """
1506        Prepare a record for handling.
1507
1508        This method just returns the passed-in record. You may want to
1509        override this method if you need to do any custom marshalling or
1510        manipulation of the record before passing it to the handlers.
1511        """
1512        return record
1513
1514    def handle(self, record):
1515        """
1516        Handle a record.
1517
1518        This just loops through the handlers offering them the record
1519        to handle.
1520        """
1521        record = self.prepare(record)
1522        for handler in self.handlers:
1523            if not self.respect_handler_level:
1524                process = True
1525            else:
1526                process = record.levelno >= handler.level
1527            if process:
1528                handler.handle(record)
1529
1530    def _monitor(self):
1531        """
1532        Monitor the queue for records, and ask the handler
1533        to deal with them.
1534
1535        This method runs on a separate, internal thread.
1536        The thread will terminate if it sees a sentinel object in the queue.
1537        """
1538        q = self.queue
1539        has_task_done = hasattr(q, 'task_done')
1540        while True:
1541            try:
1542                record = self.dequeue(True)
1543                if record is self._sentinel:
1544                    if has_task_done:
1545                        q.task_done()
1546                    break
1547                self.handle(record)
1548                if has_task_done:
1549                    q.task_done()
1550            except queue.Empty:
1551                break
1552
1553    def enqueue_sentinel(self):
1554        """
1555        This is used to enqueue the sentinel record.
1556
1557        The base implementation uses put_nowait. You may want to override this
1558        method if you want to use timeouts or work with custom queue
1559        implementations.
1560        """
1561        self.queue.put_nowait(self._sentinel)
1562
1563    def stop(self):
1564        """
1565        Stop the listener.
1566
1567        This asks the thread to terminate, and then waits for it to do so.
1568        Note that if you don't call this before your application exits, there
1569        may be some records still left on the queue, which won't be processed.
1570        """
1571        self.enqueue_sentinel()
1572        self._thread.join()
1573        self._thread = None
1574