• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2001-2016 by Vinay Sajip. All Rights Reserved.
2#
3# Permission to use, copy, modify, and distribute this software and its
4# documentation for any purpose and without fee is hereby granted,
5# provided that the above copyright notice appear in all copies and that
6# both that copyright notice and this permission notice appear in
7# supporting documentation, and that the name of Vinay Sajip
8# not be used in advertising or publicity pertaining to distribution
9# of the software without specific, written prior permission.
10# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
11# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
12# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
13# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
14# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
15# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16
17"""
18Additional handlers for the logging package for Python. The core package is
19based on PEP 282 and comments thereto in comp.lang.python.
20
21Copyright (C) 2001-2016 Vinay Sajip. All Rights Reserved.
22
23To use, simply 'import logging.handlers' and log away!
24"""
25
26import logging, socket, os, pickle, struct, time, re
27from stat import ST_DEV, ST_INO, ST_MTIME
28import queue
29try:
30    import threading
31except ImportError: #pragma: no cover
32    threading = None
33
34#
35# Some constants...
36#
37
38DEFAULT_TCP_LOGGING_PORT    = 9020
39DEFAULT_UDP_LOGGING_PORT    = 9021
40DEFAULT_HTTP_LOGGING_PORT   = 9022
41DEFAULT_SOAP_LOGGING_PORT   = 9023
42SYSLOG_UDP_PORT             = 514
43SYSLOG_TCP_PORT             = 514
44
45_MIDNIGHT = 24 * 60 * 60  # number of seconds in a day
46
47class BaseRotatingHandler(logging.FileHandler):
48    """
49    Base class for handlers that rotate log files at a certain point.
50    Not meant to be instantiated directly.  Instead, use RotatingFileHandler
51    or TimedRotatingFileHandler.
52    """
53    def __init__(self, filename, mode, encoding=None, delay=False):
54        """
55        Use the specified filename for streamed logging
56        """
57        logging.FileHandler.__init__(self, filename, mode, encoding, delay)
58        self.mode = mode
59        self.encoding = encoding
60        self.namer = None
61        self.rotator = None
62
63    def emit(self, record):
64        """
65        Emit a record.
66
67        Output the record to the file, catering for rollover as described
68        in doRollover().
69        """
70        try:
71            if self.shouldRollover(record):
72                self.doRollover()
73            logging.FileHandler.emit(self, record)
74        except Exception:
75            self.handleError(record)
76
77    def rotation_filename(self, default_name):
78        """
79        Modify the filename of a log file when rotating.
80
81        This is provided so that a custom filename can be provided.
82
83        The default implementation calls the 'namer' attribute of the
84        handler, if it's callable, passing the default name to
85        it. If the attribute isn't callable (the default is None), the name
86        is returned unchanged.
87
88        :param default_name: The default name for the log file.
89        """
90        if not callable(self.namer):
91            result = default_name
92        else:
93            result = self.namer(default_name)
94        return result
95
96    def rotate(self, source, dest):
97        """
98        When rotating, rotate the current log.
99
100        The default implementation calls the 'rotator' attribute of the
101        handler, if it's callable, passing the source and dest arguments to
102        it. If the attribute isn't callable (the default is None), the source
103        is simply renamed to the destination.
104
105        :param source: The source filename. This is normally the base
106                       filename, e.g. 'test.log'
107        :param dest:   The destination filename. This is normally
108                       what the source is rotated to, e.g. 'test.log.1'.
109        """
110        if not callable(self.rotator):
111            # Issue 18940: A file may not have been created if delay is True.
112            if os.path.exists(source):
113                os.rename(source, dest)
114        else:
115            self.rotator(source, dest)
116
117class RotatingFileHandler(BaseRotatingHandler):
118    """
119    Handler for logging to a set of files, which switches from one file
120    to the next when the current file reaches a certain size.
121    """
122    def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, encoding=None, delay=False):
123        """
124        Open the specified file and use it as the stream for logging.
125
126        By default, the file grows indefinitely. You can specify particular
127        values of maxBytes and backupCount to allow the file to rollover at
128        a predetermined size.
129
130        Rollover occurs whenever the current log file is nearly maxBytes in
131        length. If backupCount is >= 1, the system will successively create
132        new files with the same pathname as the base file, but with extensions
133        ".1", ".2" etc. appended to it. For example, with a backupCount of 5
134        and a base file name of "app.log", you would get "app.log",
135        "app.log.1", "app.log.2", ... through to "app.log.5". The file being
136        written to is always "app.log" - when it gets filled up, it is closed
137        and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
138        exist, then they are renamed to "app.log.2", "app.log.3" etc.
139        respectively.
140
141        If maxBytes is zero, rollover never occurs.
142        """
143        # If rotation/rollover is wanted, it doesn't make sense to use another
144        # mode. If for example 'w' were specified, then if there were multiple
145        # runs of the calling application, the logs from previous runs would be
146        # lost if the 'w' is respected, because the log file would be truncated
147        # on each run.
148        if maxBytes > 0:
149            mode = 'a'
150        BaseRotatingHandler.__init__(self, filename, mode, encoding, delay)
151        self.maxBytes = maxBytes
152        self.backupCount = backupCount
153
154    def doRollover(self):
155        """
156        Do a rollover, as described in __init__().
157        """
158        if self.stream:
159            self.stream.close()
160            self.stream = None
161        if self.backupCount > 0:
162            for i in range(self.backupCount - 1, 0, -1):
163                sfn = self.rotation_filename("%s.%d" % (self.baseFilename, i))
164                dfn = self.rotation_filename("%s.%d" % (self.baseFilename,
165                                                        i + 1))
166                if os.path.exists(sfn):
167                    if os.path.exists(dfn):
168                        os.remove(dfn)
169                    os.rename(sfn, dfn)
170            dfn = self.rotation_filename(self.baseFilename + ".1")
171            if os.path.exists(dfn):
172                os.remove(dfn)
173            self.rotate(self.baseFilename, dfn)
174        if not self.delay:
175            self.stream = self._open()
176
177    def shouldRollover(self, record):
178        """
179        Determine if rollover should occur.
180
181        Basically, see if the supplied record would cause the file to exceed
182        the size limit we have.
183        """
184        if self.stream is None:                 # delay was set...
185            self.stream = self._open()
186        if self.maxBytes > 0:                   # are we rolling over?
187            msg = "%s\n" % self.format(record)
188            self.stream.seek(0, 2)  #due to non-posix-compliant Windows feature
189            if self.stream.tell() + len(msg) >= self.maxBytes:
190                return 1
191        return 0
192
193class TimedRotatingFileHandler(BaseRotatingHandler):
194    """
195    Handler for logging to a file, rotating the log file at certain timed
196    intervals.
197
198    If backupCount is > 0, when rollover is done, no more than backupCount
199    files are kept - the oldest ones are deleted.
200    """
201    def __init__(self, filename, when='h', interval=1, backupCount=0, encoding=None, delay=False, utc=False, atTime=None):
202        BaseRotatingHandler.__init__(self, filename, 'a', encoding, delay)
203        self.when = when.upper()
204        self.backupCount = backupCount
205        self.utc = utc
206        self.atTime = atTime
207        # Calculate the real rollover interval, which is just the number of
208        # seconds between rollovers.  Also set the filename suffix used when
209        # a rollover occurs.  Current 'when' events supported:
210        # S - Seconds
211        # M - Minutes
212        # H - Hours
213        # D - Days
214        # midnight - roll over at midnight
215        # W{0-6} - roll over on a certain day; 0 - Monday
216        #
217        # Case of the 'when' specifier is not important; lower or upper case
218        # will work.
219        if self.when == 'S':
220            self.interval = 1 # one second
221            self.suffix = "%Y-%m-%d_%H-%M-%S"
222            self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}(\.\w+)?$"
223        elif self.when == 'M':
224            self.interval = 60 # one minute
225            self.suffix = "%Y-%m-%d_%H-%M"
226            self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}(\.\w+)?$"
227        elif self.when == 'H':
228            self.interval = 60 * 60 # one hour
229            self.suffix = "%Y-%m-%d_%H"
230            self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}(\.\w+)?$"
231        elif self.when == 'D' or self.when == 'MIDNIGHT':
232            self.interval = 60 * 60 * 24 # one day
233            self.suffix = "%Y-%m-%d"
234            self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$"
235        elif self.when.startswith('W'):
236            self.interval = 60 * 60 * 24 * 7 # one week
237            if len(self.when) != 2:
238                raise ValueError("You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s" % self.when)
239            if self.when[1] < '0' or self.when[1] > '6':
240                raise ValueError("Invalid day specified for weekly rollover: %s" % self.when)
241            self.dayOfWeek = int(self.when[1])
242            self.suffix = "%Y-%m-%d"
243            self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$"
244        else:
245            raise ValueError("Invalid rollover interval specified: %s" % self.when)
246
247        self.extMatch = re.compile(self.extMatch, re.ASCII)
248        self.interval = self.interval * interval # multiply by units requested
249        # The following line added because the filename passed in could be a
250        # path object (see Issue #27493), but self.baseFilename will be a string
251        filename = self.baseFilename
252        if os.path.exists(filename):
253            t = os.stat(filename)[ST_MTIME]
254        else:
255            t = int(time.time())
256        self.rolloverAt = self.computeRollover(t)
257
258    def computeRollover(self, currentTime):
259        """
260        Work out the rollover time based on the specified time.
261        """
262        result = currentTime + self.interval
263        # If we are rolling over at midnight or weekly, then the interval is already known.
264        # What we need to figure out is WHEN the next interval is.  In other words,
265        # if you are rolling over at midnight, then your base interval is 1 day,
266        # but you want to start that one day clock at midnight, not now.  So, we
267        # have to fudge the rolloverAt value in order to trigger the first rollover
268        # at the right time.  After that, the regular interval will take care of
269        # the rest.  Note that this code doesn't care about leap seconds. :)
270        if self.when == 'MIDNIGHT' or self.when.startswith('W'):
271            # This could be done with less code, but I wanted it to be clear
272            if self.utc:
273                t = time.gmtime(currentTime)
274            else:
275                t = time.localtime(currentTime)
276            currentHour = t[3]
277            currentMinute = t[4]
278            currentSecond = t[5]
279            currentDay = t[6]
280            # r is the number of seconds left between now and the next rotation
281            if self.atTime is None:
282                rotate_ts = _MIDNIGHT
283            else:
284                rotate_ts = ((self.atTime.hour * 60 + self.atTime.minute)*60 +
285                    self.atTime.second)
286
287            r = rotate_ts - ((currentHour * 60 + currentMinute) * 60 +
288                currentSecond)
289            if r < 0:
290                # Rotate time is before the current time (for example when
291                # self.rotateAt is 13:45 and it now 14:15), rotation is
292                # tomorrow.
293                r += _MIDNIGHT
294                currentDay = (currentDay + 1) % 7
295            result = currentTime + r
296            # If we are rolling over on a certain day, add in the number of days until
297            # the next rollover, but offset by 1 since we just calculated the time
298            # until the next day starts.  There are three cases:
299            # Case 1) The day to rollover is today; in this case, do nothing
300            # Case 2) The day to rollover is further in the interval (i.e., today is
301            #         day 2 (Wednesday) and rollover is on day 6 (Sunday).  Days to
302            #         next rollover is simply 6 - 2 - 1, or 3.
303            # Case 3) The day to rollover is behind us in the interval (i.e., today
304            #         is day 5 (Saturday) and rollover is on day 3 (Thursday).
305            #         Days to rollover is 6 - 5 + 3, or 4.  In this case, it's the
306            #         number of days left in the current week (1) plus the number
307            #         of days in the next week until the rollover day (3).
308            # The calculations described in 2) and 3) above need to have a day added.
309            # This is because the above time calculation takes us to midnight on this
310            # day, i.e. the start of the next day.
311            if self.when.startswith('W'):
312                day = currentDay # 0 is Monday
313                if day != self.dayOfWeek:
314                    if day < self.dayOfWeek:
315                        daysToWait = self.dayOfWeek - day
316                    else:
317                        daysToWait = 6 - day + self.dayOfWeek + 1
318                    newRolloverAt = result + (daysToWait * (60 * 60 * 24))
319                    if not self.utc:
320                        dstNow = t[-1]
321                        dstAtRollover = time.localtime(newRolloverAt)[-1]
322                        if dstNow != dstAtRollover:
323                            if not dstNow:  # DST kicks in before next rollover, so we need to deduct an hour
324                                addend = -3600
325                            else:           # DST bows out before next rollover, so we need to add an hour
326                                addend = 3600
327                            newRolloverAt += addend
328                    result = newRolloverAt
329        return result
330
331    def shouldRollover(self, record):
332        """
333        Determine if rollover should occur.
334
335        record is not used, as we are just comparing times, but it is needed so
336        the method signatures are the same
337        """
338        t = int(time.time())
339        if t >= self.rolloverAt:
340            return 1
341        return 0
342
343    def getFilesToDelete(self):
344        """
345        Determine the files to delete when rolling over.
346
347        More specific than the earlier method, which just used glob.glob().
348        """
349        dirName, baseName = os.path.split(self.baseFilename)
350        fileNames = os.listdir(dirName)
351        result = []
352        prefix = baseName + "."
353        plen = len(prefix)
354        for fileName in fileNames:
355            if fileName[:plen] == prefix:
356                suffix = fileName[plen:]
357                if self.extMatch.match(suffix):
358                    result.append(os.path.join(dirName, fileName))
359        result.sort()
360        if len(result) < self.backupCount:
361            result = []
362        else:
363            result = result[:len(result) - self.backupCount]
364        return result
365
366    def doRollover(self):
367        """
368        do a rollover; in this case, a date/time stamp is appended to the filename
369        when the rollover happens.  However, you want the file to be named for the
370        start of the interval, not the current time.  If there is a backup count,
371        then we have to get a list of matching filenames, sort them and remove
372        the one with the oldest suffix.
373        """
374        if self.stream:
375            self.stream.close()
376            self.stream = None
377        # get the time that this sequence started at and make it a TimeTuple
378        currentTime = int(time.time())
379        dstNow = time.localtime(currentTime)[-1]
380        t = self.rolloverAt - self.interval
381        if self.utc:
382            timeTuple = time.gmtime(t)
383        else:
384            timeTuple = time.localtime(t)
385            dstThen = timeTuple[-1]
386            if dstNow != dstThen:
387                if dstNow:
388                    addend = 3600
389                else:
390                    addend = -3600
391                timeTuple = time.localtime(t + addend)
392        dfn = self.rotation_filename(self.baseFilename + "." +
393                                     time.strftime(self.suffix, timeTuple))
394        if os.path.exists(dfn):
395            os.remove(dfn)
396        self.rotate(self.baseFilename, dfn)
397        if self.backupCount > 0:
398            for s in self.getFilesToDelete():
399                os.remove(s)
400        if not self.delay:
401            self.stream = self._open()
402        newRolloverAt = self.computeRollover(currentTime)
403        while newRolloverAt <= currentTime:
404            newRolloverAt = newRolloverAt + self.interval
405        #If DST changes and midnight or weekly rollover, adjust for this.
406        if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc:
407            dstAtRollover = time.localtime(newRolloverAt)[-1]
408            if dstNow != dstAtRollover:
409                if not dstNow:  # DST kicks in before next rollover, so we need to deduct an hour
410                    addend = -3600
411                else:           # DST bows out before next rollover, so we need to add an hour
412                    addend = 3600
413                newRolloverAt += addend
414        self.rolloverAt = newRolloverAt
415
416class WatchedFileHandler(logging.FileHandler):
417    """
418    A handler for logging to a file, which watches the file
419    to see if it has changed while in use. This can happen because of
420    usage of programs such as newsyslog and logrotate which perform
421    log file rotation. This handler, intended for use under Unix,
422    watches the file to see if it has changed since the last emit.
423    (A file has changed if its device or inode have changed.)
424    If it has changed, the old file stream is closed, and the file
425    opened to get a new stream.
426
427    This handler is not appropriate for use under Windows, because
428    under Windows open files cannot be moved or renamed - logging
429    opens the files with exclusive locks - and so there is no need
430    for such a handler. Furthermore, ST_INO is not supported under
431    Windows; stat always returns zero for this value.
432
433    This handler is based on a suggestion and patch by Chad J.
434    Schroeder.
435    """
436    def __init__(self, filename, mode='a', encoding=None, delay=False):
437        logging.FileHandler.__init__(self, filename, mode, encoding, delay)
438        self.dev, self.ino = -1, -1
439        self._statstream()
440
441    def _statstream(self):
442        if self.stream:
443            sres = os.fstat(self.stream.fileno())
444            self.dev, self.ino = sres[ST_DEV], sres[ST_INO]
445
446    def reopenIfNeeded(self):
447        """
448        Reopen log file if needed.
449
450        Checks if the underlying file has changed, and if it
451        has, close the old stream and reopen the file to get the
452        current stream.
453        """
454        # Reduce the chance of race conditions by stat'ing by path only
455        # once and then fstat'ing our new fd if we opened a new log stream.
456        # See issue #14632: Thanks to John Mulligan for the problem report
457        # and patch.
458        try:
459            # stat the file by path, checking for existence
460            sres = os.stat(self.baseFilename)
461        except FileNotFoundError:
462            sres = None
463        # compare file system stat with that of our stream file handle
464        if not sres or sres[ST_DEV] != self.dev or sres[ST_INO] != self.ino:
465            if self.stream is not None:
466                # we have an open file handle, clean it up
467                self.stream.flush()
468                self.stream.close()
469                self.stream = None  # See Issue #21742: _open () might fail.
470                # open a new file handle and get new stat info from that fd
471                self.stream = self._open()
472                self._statstream()
473
474    def emit(self, record):
475        """
476        Emit a record.
477
478        If underlying file has changed, reopen the file before emitting the
479        record to it.
480        """
481        self.reopenIfNeeded()
482        logging.FileHandler.emit(self, record)
483
484
485class SocketHandler(logging.Handler):
486    """
487    A handler class which writes logging records, in pickle format, to
488    a streaming socket. The socket is kept open across logging calls.
489    If the peer resets it, an attempt is made to reconnect on the next call.
490    The pickle which is sent is that of the LogRecord's attribute dictionary
491    (__dict__), so that the receiver does not need to have the logging module
492    installed in order to process the logging event.
493
494    To unpickle the record at the receiving end into a LogRecord, use the
495    makeLogRecord function.
496    """
497
498    def __init__(self, host, port):
499        """
500        Initializes the handler with a specific host address and port.
501
502        When the attribute *closeOnError* is set to True - if a socket error
503        occurs, the socket is silently closed and then reopened on the next
504        logging call.
505        """
506        logging.Handler.__init__(self)
507        self.host = host
508        self.port = port
509        if port is None:
510            self.address = host
511        else:
512            self.address = (host, port)
513        self.sock = None
514        self.closeOnError = False
515        self.retryTime = None
516        #
517        # Exponential backoff parameters.
518        #
519        self.retryStart = 1.0
520        self.retryMax = 30.0
521        self.retryFactor = 2.0
522
523    def makeSocket(self, timeout=1):
524        """
525        A factory method which allows subclasses to define the precise
526        type of socket they want.
527        """
528        if self.port is not None:
529            result = socket.create_connection(self.address, timeout=timeout)
530        else:
531            result = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
532            result.settimeout(timeout)
533            try:
534                result.connect(self.address)
535            except OSError:
536                result.close()  # Issue 19182
537                raise
538        return result
539
540    def createSocket(self):
541        """
542        Try to create a socket, using an exponential backoff with
543        a max retry time. Thanks to Robert Olson for the original patch
544        (SF #815911) which has been slightly refactored.
545        """
546        now = time.time()
547        # Either retryTime is None, in which case this
548        # is the first time back after a disconnect, or
549        # we've waited long enough.
550        if self.retryTime is None:
551            attempt = True
552        else:
553            attempt = (now >= self.retryTime)
554        if attempt:
555            try:
556                self.sock = self.makeSocket()
557                self.retryTime = None # next time, no delay before trying
558            except OSError:
559                #Creation failed, so set the retry time and return.
560                if self.retryTime is None:
561                    self.retryPeriod = self.retryStart
562                else:
563                    self.retryPeriod = self.retryPeriod * self.retryFactor
564                    if self.retryPeriod > self.retryMax:
565                        self.retryPeriod = self.retryMax
566                self.retryTime = now + self.retryPeriod
567
568    def send(self, s):
569        """
570        Send a pickled string to the socket.
571
572        This function allows for partial sends which can happen when the
573        network is busy.
574        """
575        if self.sock is None:
576            self.createSocket()
577        #self.sock can be None either because we haven't reached the retry
578        #time yet, or because we have reached the retry time and retried,
579        #but are still unable to connect.
580        if self.sock:
581            try:
582                self.sock.sendall(s)
583            except OSError: #pragma: no cover
584                self.sock.close()
585                self.sock = None  # so we can call createSocket next time
586
587    def makePickle(self, record):
588        """
589        Pickles the record in binary format with a length prefix, and
590        returns it ready for transmission across the socket.
591        """
592        ei = record.exc_info
593        if ei:
594            # just to get traceback text into record.exc_text ...
595            dummy = self.format(record)
596        # See issue #14436: If msg or args are objects, they may not be
597        # available on the receiving end. So we convert the msg % args
598        # to a string, save it as msg and zap the args.
599        d = dict(record.__dict__)
600        d['msg'] = record.getMessage()
601        d['args'] = None
602        d['exc_info'] = None
603        # Issue #25685: delete 'message' if present: redundant with 'msg'
604        d.pop('message', None)
605        s = pickle.dumps(d, 1)
606        slen = struct.pack(">L", len(s))
607        return slen + s
608
609    def handleError(self, record):
610        """
611        Handle an error during logging.
612
613        An error has occurred during logging. Most likely cause -
614        connection lost. Close the socket so that we can retry on the
615        next event.
616        """
617        if self.closeOnError and self.sock:
618            self.sock.close()
619            self.sock = None        #try to reconnect next time
620        else:
621            logging.Handler.handleError(self, record)
622
623    def emit(self, record):
624        """
625        Emit a record.
626
627        Pickles the record and writes it to the socket in binary format.
628        If there is an error with the socket, silently drop the packet.
629        If there was a problem with the socket, re-establishes the
630        socket.
631        """
632        try:
633            s = self.makePickle(record)
634            self.send(s)
635        except Exception:
636            self.handleError(record)
637
638    def close(self):
639        """
640        Closes the socket.
641        """
642        self.acquire()
643        try:
644            sock = self.sock
645            if sock:
646                self.sock = None
647                sock.close()
648            logging.Handler.close(self)
649        finally:
650            self.release()
651
652class DatagramHandler(SocketHandler):
653    """
654    A handler class which writes logging records, in pickle format, to
655    a datagram socket.  The pickle which is sent is that of the LogRecord's
656    attribute dictionary (__dict__), so that the receiver does not need to
657    have the logging module installed in order to process the logging event.
658
659    To unpickle the record at the receiving end into a LogRecord, use the
660    makeLogRecord function.
661
662    """
663    def __init__(self, host, port):
664        """
665        Initializes the handler with a specific host address and port.
666        """
667        SocketHandler.__init__(self, host, port)
668        self.closeOnError = False
669
670    def makeSocket(self):
671        """
672        The factory method of SocketHandler is here overridden to create
673        a UDP socket (SOCK_DGRAM).
674        """
675        if self.port is None:
676            family = socket.AF_UNIX
677        else:
678            family = socket.AF_INET
679        s = socket.socket(family, socket.SOCK_DGRAM)
680        return s
681
682    def send(self, s):
683        """
684        Send a pickled string to a socket.
685
686        This function no longer allows for partial sends which can happen
687        when the network is busy - UDP does not guarantee delivery and
688        can deliver packets out of sequence.
689        """
690        if self.sock is None:
691            self.createSocket()
692        self.sock.sendto(s, self.address)
693
694class SysLogHandler(logging.Handler):
695    """
696    A handler class which sends formatted logging records to a syslog
697    server. Based on Sam Rushing's syslog module:
698    http://www.nightmare.com/squirl/python-ext/misc/syslog.py
699    Contributed by Nicolas Untz (after which minor refactoring changes
700    have been made).
701    """
702
703    # from <linux/sys/syslog.h>:
704    # ======================================================================
705    # priorities/facilities are encoded into a single 32-bit quantity, where
706    # the bottom 3 bits are the priority (0-7) and the top 28 bits are the
707    # facility (0-big number). Both the priorities and the facilities map
708    # roughly one-to-one to strings in the syslogd(8) source code.  This
709    # mapping is included in this file.
710    #
711    # priorities (these are ordered)
712
713    LOG_EMERG     = 0       #  system is unusable
714    LOG_ALERT     = 1       #  action must be taken immediately
715    LOG_CRIT      = 2       #  critical conditions
716    LOG_ERR       = 3       #  error conditions
717    LOG_WARNING   = 4       #  warning conditions
718    LOG_NOTICE    = 5       #  normal but significant condition
719    LOG_INFO      = 6       #  informational
720    LOG_DEBUG     = 7       #  debug-level messages
721
722    #  facility codes
723    LOG_KERN      = 0       #  kernel messages
724    LOG_USER      = 1       #  random user-level messages
725    LOG_MAIL      = 2       #  mail system
726    LOG_DAEMON    = 3       #  system daemons
727    LOG_AUTH      = 4       #  security/authorization messages
728    LOG_SYSLOG    = 5       #  messages generated internally by syslogd
729    LOG_LPR       = 6       #  line printer subsystem
730    LOG_NEWS      = 7       #  network news subsystem
731    LOG_UUCP      = 8       #  UUCP subsystem
732    LOG_CRON      = 9       #  clock daemon
733    LOG_AUTHPRIV  = 10      #  security/authorization messages (private)
734    LOG_FTP       = 11      #  FTP daemon
735
736    #  other codes through 15 reserved for system use
737    LOG_LOCAL0    = 16      #  reserved for local use
738    LOG_LOCAL1    = 17      #  reserved for local use
739    LOG_LOCAL2    = 18      #  reserved for local use
740    LOG_LOCAL3    = 19      #  reserved for local use
741    LOG_LOCAL4    = 20      #  reserved for local use
742    LOG_LOCAL5    = 21      #  reserved for local use
743    LOG_LOCAL6    = 22      #  reserved for local use
744    LOG_LOCAL7    = 23      #  reserved for local use
745
746    priority_names = {
747        "alert":    LOG_ALERT,
748        "crit":     LOG_CRIT,
749        "critical": LOG_CRIT,
750        "debug":    LOG_DEBUG,
751        "emerg":    LOG_EMERG,
752        "err":      LOG_ERR,
753        "error":    LOG_ERR,        #  DEPRECATED
754        "info":     LOG_INFO,
755        "notice":   LOG_NOTICE,
756        "panic":    LOG_EMERG,      #  DEPRECATED
757        "warn":     LOG_WARNING,    #  DEPRECATED
758        "warning":  LOG_WARNING,
759        }
760
761    facility_names = {
762        "auth":     LOG_AUTH,
763        "authpriv": LOG_AUTHPRIV,
764        "cron":     LOG_CRON,
765        "daemon":   LOG_DAEMON,
766        "ftp":      LOG_FTP,
767        "kern":     LOG_KERN,
768        "lpr":      LOG_LPR,
769        "mail":     LOG_MAIL,
770        "news":     LOG_NEWS,
771        "security": LOG_AUTH,       #  DEPRECATED
772        "syslog":   LOG_SYSLOG,
773        "user":     LOG_USER,
774        "uucp":     LOG_UUCP,
775        "local0":   LOG_LOCAL0,
776        "local1":   LOG_LOCAL1,
777        "local2":   LOG_LOCAL2,
778        "local3":   LOG_LOCAL3,
779        "local4":   LOG_LOCAL4,
780        "local5":   LOG_LOCAL5,
781        "local6":   LOG_LOCAL6,
782        "local7":   LOG_LOCAL7,
783        }
784
785    #The map below appears to be trivially lowercasing the key. However,
786    #there's more to it than meets the eye - in some locales, lowercasing
787    #gives unexpected results. See SF #1524081: in the Turkish locale,
788    #"INFO".lower() != "info"
789    priority_map = {
790        "DEBUG" : "debug",
791        "INFO" : "info",
792        "WARNING" : "warning",
793        "ERROR" : "error",
794        "CRITICAL" : "critical"
795    }
796
797    def __init__(self, address=('localhost', SYSLOG_UDP_PORT),
798                 facility=LOG_USER, socktype=None):
799        """
800        Initialize a handler.
801
802        If address is specified as a string, a UNIX socket is used. To log to a
803        local syslogd, "SysLogHandler(address="/dev/log")" can be used.
804        If facility is not specified, LOG_USER is used. If socktype is
805        specified as socket.SOCK_DGRAM or socket.SOCK_STREAM, that specific
806        socket type will be used. For Unix sockets, you can also specify a
807        socktype of None, in which case socket.SOCK_DGRAM will be used, falling
808        back to socket.SOCK_STREAM.
809        """
810        logging.Handler.__init__(self)
811
812        self.address = address
813        self.facility = facility
814        self.socktype = socktype
815
816        if isinstance(address, str):
817            self.unixsocket = True
818            self._connect_unixsocket(address)
819        else:
820            self.unixsocket = False
821            if socktype is None:
822                socktype = socket.SOCK_DGRAM
823            self.socket = socket.socket(socket.AF_INET, socktype)
824            if socktype == socket.SOCK_STREAM:
825                self.socket.connect(address)
826            self.socktype = socktype
827        self.formatter = None
828
829    def _connect_unixsocket(self, address):
830        use_socktype = self.socktype
831        if use_socktype is None:
832            use_socktype = socket.SOCK_DGRAM
833        self.socket = socket.socket(socket.AF_UNIX, use_socktype)
834        try:
835            self.socket.connect(address)
836            # it worked, so set self.socktype to the used type
837            self.socktype = use_socktype
838        except OSError:
839            self.socket.close()
840            if self.socktype is not None:
841                # user didn't specify falling back, so fail
842                raise
843            use_socktype = socket.SOCK_STREAM
844            self.socket = socket.socket(socket.AF_UNIX, use_socktype)
845            try:
846                self.socket.connect(address)
847                # it worked, so set self.socktype to the used type
848                self.socktype = use_socktype
849            except OSError:
850                self.socket.close()
851                raise
852
853    def encodePriority(self, facility, priority):
854        """
855        Encode the facility and priority. You can pass in strings or
856        integers - if strings are passed, the facility_names and
857        priority_names mapping dictionaries are used to convert them to
858        integers.
859        """
860        if isinstance(facility, str):
861            facility = self.facility_names[facility]
862        if isinstance(priority, str):
863            priority = self.priority_names[priority]
864        return (facility << 3) | priority
865
866    def close (self):
867        """
868        Closes the socket.
869        """
870        self.acquire()
871        try:
872            self.socket.close()
873            logging.Handler.close(self)
874        finally:
875            self.release()
876
877    def mapPriority(self, levelName):
878        """
879        Map a logging level name to a key in the priority_names map.
880        This is useful in two scenarios: when custom levels are being
881        used, and in the case where you can't do a straightforward
882        mapping by lowercasing the logging level name because of locale-
883        specific issues (see SF #1524081).
884        """
885        return self.priority_map.get(levelName, "warning")
886
887    ident = ''          # prepended to all messages
888    append_nul = True   # some old syslog daemons expect a NUL terminator
889
890    def emit(self, record):
891        """
892        Emit a record.
893
894        The record is formatted, and then sent to the syslog server. If
895        exception information is present, it is NOT sent to the server.
896        """
897        try:
898            msg = self.format(record)
899            if self.ident:
900                msg = self.ident + msg
901            if self.append_nul:
902                msg += '\000'
903
904            # We need to convert record level to lowercase, maybe this will
905            # change in the future.
906            prio = '<%d>' % self.encodePriority(self.facility,
907                                                self.mapPriority(record.levelname))
908            prio = prio.encode('utf-8')
909            # Message is a string. Convert to bytes as required by RFC 5424
910            msg = msg.encode('utf-8')
911            msg = prio + msg
912            if self.unixsocket:
913                try:
914                    self.socket.send(msg)
915                except OSError:
916                    self.socket.close()
917                    self._connect_unixsocket(self.address)
918                    self.socket.send(msg)
919            elif self.socktype == socket.SOCK_DGRAM:
920                self.socket.sendto(msg, self.address)
921            else:
922                self.socket.sendall(msg)
923        except Exception:
924            self.handleError(record)
925
926class SMTPHandler(logging.Handler):
927    """
928    A handler class which sends an SMTP email for each logging event.
929    """
930    def __init__(self, mailhost, fromaddr, toaddrs, subject,
931                 credentials=None, secure=None, timeout=5.0):
932        """
933        Initialize the handler.
934
935        Initialize the instance with the from and to addresses and subject
936        line of the email. To specify a non-standard SMTP port, use the
937        (host, port) tuple format for the mailhost argument. To specify
938        authentication credentials, supply a (username, password) tuple
939        for the credentials argument. To specify the use of a secure
940        protocol (TLS), pass in a tuple for the secure argument. This will
941        only be used when authentication credentials are supplied. The tuple
942        will be either an empty tuple, or a single-value tuple with the name
943        of a keyfile, or a 2-value tuple with the names of the keyfile and
944        certificate file. (This tuple is passed to the `starttls` method).
945        A timeout in seconds can be specified for the SMTP connection (the
946        default is one second).
947        """
948        logging.Handler.__init__(self)
949        if isinstance(mailhost, (list, tuple)):
950            self.mailhost, self.mailport = mailhost
951        else:
952            self.mailhost, self.mailport = mailhost, None
953        if isinstance(credentials, (list, tuple)):
954            self.username, self.password = credentials
955        else:
956            self.username = None
957        self.fromaddr = fromaddr
958        if isinstance(toaddrs, str):
959            toaddrs = [toaddrs]
960        self.toaddrs = toaddrs
961        self.subject = subject
962        self.secure = secure
963        self.timeout = timeout
964
965    def getSubject(self, record):
966        """
967        Determine the subject for the email.
968
969        If you want to specify a subject line which is record-dependent,
970        override this method.
971        """
972        return self.subject
973
974    def emit(self, record):
975        """
976        Emit a record.
977
978        Format the record and send it to the specified addressees.
979        """
980        try:
981            import smtplib
982            from email.message import EmailMessage
983            import email.utils
984
985            port = self.mailport
986            if not port:
987                port = smtplib.SMTP_PORT
988            smtp = smtplib.SMTP(self.mailhost, port, timeout=self.timeout)
989            msg = EmailMessage()
990            msg['From'] = self.fromaddr
991            msg['To'] = ','.join(self.toaddrs)
992            msg['Subject'] = self.getSubject(record)
993            msg['Date'] = email.utils.localtime()
994            msg.set_content(self.format(record))
995            if self.username:
996                if self.secure is not None:
997                    smtp.ehlo()
998                    smtp.starttls(*self.secure)
999                    smtp.ehlo()
1000                smtp.login(self.username, self.password)
1001            smtp.send_message(msg)
1002            smtp.quit()
1003        except Exception:
1004            self.handleError(record)
1005
1006class NTEventLogHandler(logging.Handler):
1007    """
1008    A handler class which sends events to the NT Event Log. Adds a
1009    registry entry for the specified application name. If no dllname is
1010    provided, win32service.pyd (which contains some basic message
1011    placeholders) is used. Note that use of these placeholders will make
1012    your event logs big, as the entire message source is held in the log.
1013    If you want slimmer logs, you have to pass in the name of your own DLL
1014    which contains the message definitions you want to use in the event log.
1015    """
1016    def __init__(self, appname, dllname=None, logtype="Application"):
1017        logging.Handler.__init__(self)
1018        try:
1019            import win32evtlogutil, win32evtlog
1020            self.appname = appname
1021            self._welu = win32evtlogutil
1022            if not dllname:
1023                dllname = os.path.split(self._welu.__file__)
1024                dllname = os.path.split(dllname[0])
1025                dllname = os.path.join(dllname[0], r'win32service.pyd')
1026            self.dllname = dllname
1027            self.logtype = logtype
1028            self._welu.AddSourceToRegistry(appname, dllname, logtype)
1029            self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE
1030            self.typemap = {
1031                logging.DEBUG   : win32evtlog.EVENTLOG_INFORMATION_TYPE,
1032                logging.INFO    : win32evtlog.EVENTLOG_INFORMATION_TYPE,
1033                logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE,
1034                logging.ERROR   : win32evtlog.EVENTLOG_ERROR_TYPE,
1035                logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE,
1036         }
1037        except ImportError:
1038            print("The Python Win32 extensions for NT (service, event "\
1039                        "logging) appear not to be available.")
1040            self._welu = None
1041
1042    def getMessageID(self, record):
1043        """
1044        Return the message ID for the event record. If you are using your
1045        own messages, you could do this by having the msg passed to the
1046        logger being an ID rather than a formatting string. Then, in here,
1047        you could use a dictionary lookup to get the message ID. This
1048        version returns 1, which is the base message ID in win32service.pyd.
1049        """
1050        return 1
1051
1052    def getEventCategory(self, record):
1053        """
1054        Return the event category for the record.
1055
1056        Override this if you want to specify your own categories. This version
1057        returns 0.
1058        """
1059        return 0
1060
1061    def getEventType(self, record):
1062        """
1063        Return the event type for the record.
1064
1065        Override this if you want to specify your own types. This version does
1066        a mapping using the handler's typemap attribute, which is set up in
1067        __init__() to a dictionary which contains mappings for DEBUG, INFO,
1068        WARNING, ERROR and CRITICAL. If you are using your own levels you will
1069        either need to override this method or place a suitable dictionary in
1070        the handler's typemap attribute.
1071        """
1072        return self.typemap.get(record.levelno, self.deftype)
1073
1074    def emit(self, record):
1075        """
1076        Emit a record.
1077
1078        Determine the message ID, event category and event type. Then
1079        log the message in the NT event log.
1080        """
1081        if self._welu:
1082            try:
1083                id = self.getMessageID(record)
1084                cat = self.getEventCategory(record)
1085                type = self.getEventType(record)
1086                msg = self.format(record)
1087                self._welu.ReportEvent(self.appname, id, cat, type, [msg])
1088            except Exception:
1089                self.handleError(record)
1090
1091    def close(self):
1092        """
1093        Clean up this handler.
1094
1095        You can remove the application name from the registry as a
1096        source of event log entries. However, if you do this, you will
1097        not be able to see the events as you intended in the Event Log
1098        Viewer - it needs to be able to access the registry to get the
1099        DLL name.
1100        """
1101        #self._welu.RemoveSourceFromRegistry(self.appname, self.logtype)
1102        logging.Handler.close(self)
1103
1104class HTTPHandler(logging.Handler):
1105    """
1106    A class which sends records to a Web server, using either GET or
1107    POST semantics.
1108    """
1109    def __init__(self, host, url, method="GET", secure=False, credentials=None,
1110                 context=None):
1111        """
1112        Initialize the instance with the host, the request URL, and the method
1113        ("GET" or "POST")
1114        """
1115        logging.Handler.__init__(self)
1116        method = method.upper()
1117        if method not in ["GET", "POST"]:
1118            raise ValueError("method must be GET or POST")
1119        if not secure and context is not None:
1120            raise ValueError("context parameter only makes sense "
1121                             "with secure=True")
1122        self.host = host
1123        self.url = url
1124        self.method = method
1125        self.secure = secure
1126        self.credentials = credentials
1127        self.context = context
1128
1129    def mapLogRecord(self, record):
1130        """
1131        Default implementation of mapping the log record into a dict
1132        that is sent as the CGI data. Overwrite in your class.
1133        Contributed by Franz Glasner.
1134        """
1135        return record.__dict__
1136
1137    def emit(self, record):
1138        """
1139        Emit a record.
1140
1141        Send the record to the Web server as a percent-encoded dictionary
1142        """
1143        try:
1144            import http.client, urllib.parse
1145            host = self.host
1146            if self.secure:
1147                h = http.client.HTTPSConnection(host, context=self.context)
1148            else:
1149                h = http.client.HTTPConnection(host)
1150            url = self.url
1151            data = urllib.parse.urlencode(self.mapLogRecord(record))
1152            if self.method == "GET":
1153                if (url.find('?') >= 0):
1154                    sep = '&'
1155                else:
1156                    sep = '?'
1157                url = url + "%c%s" % (sep, data)
1158            h.putrequest(self.method, url)
1159            # support multiple hosts on one IP address...
1160            # need to strip optional :port from host, if present
1161            i = host.find(":")
1162            if i >= 0:
1163                host = host[:i]
1164            h.putheader("Host", host)
1165            if self.method == "POST":
1166                h.putheader("Content-type",
1167                            "application/x-www-form-urlencoded")
1168                h.putheader("Content-length", str(len(data)))
1169            if self.credentials:
1170                import base64
1171                s = ('%s:%s' % self.credentials).encode('utf-8')
1172                s = 'Basic ' + base64.b64encode(s).strip().decode('ascii')
1173                h.putheader('Authorization', s)
1174            h.endheaders()
1175            if self.method == "POST":
1176                h.send(data.encode('utf-8'))
1177            h.getresponse()    #can't do anything with the result
1178        except Exception:
1179            self.handleError(record)
1180
1181class BufferingHandler(logging.Handler):
1182    """
1183  A handler class which buffers logging records in memory. Whenever each
1184  record is added to the buffer, a check is made to see if the buffer should
1185  be flushed. If it should, then flush() is expected to do what's needed.
1186    """
1187    def __init__(self, capacity):
1188        """
1189        Initialize the handler with the buffer size.
1190        """
1191        logging.Handler.__init__(self)
1192        self.capacity = capacity
1193        self.buffer = []
1194
1195    def shouldFlush(self, record):
1196        """
1197        Should the handler flush its buffer?
1198
1199        Returns true if the buffer is up to capacity. This method can be
1200        overridden to implement custom flushing strategies.
1201        """
1202        return (len(self.buffer) >= self.capacity)
1203
1204    def emit(self, record):
1205        """
1206        Emit a record.
1207
1208        Append the record. If shouldFlush() tells us to, call flush() to process
1209        the buffer.
1210        """
1211        self.buffer.append(record)
1212        if self.shouldFlush(record):
1213            self.flush()
1214
1215    def flush(self):
1216        """
1217        Override to implement custom flushing behaviour.
1218
1219        This version just zaps the buffer to empty.
1220        """
1221        self.acquire()
1222        try:
1223            self.buffer = []
1224        finally:
1225            self.release()
1226
1227    def close(self):
1228        """
1229        Close the handler.
1230
1231        This version just flushes and chains to the parent class' close().
1232        """
1233        try:
1234            self.flush()
1235        finally:
1236            logging.Handler.close(self)
1237
1238class MemoryHandler(BufferingHandler):
1239    """
1240    A handler class which buffers logging records in memory, periodically
1241    flushing them to a target handler. Flushing occurs whenever the buffer
1242    is full, or when an event of a certain severity or greater is seen.
1243    """
1244    def __init__(self, capacity, flushLevel=logging.ERROR, target=None,
1245                 flushOnClose=True):
1246        """
1247        Initialize the handler with the buffer size, the level at which
1248        flushing should occur and an optional target.
1249
1250        Note that without a target being set either here or via setTarget(),
1251        a MemoryHandler is no use to anyone!
1252
1253        The ``flushOnClose`` argument is ``True`` for backward compatibility
1254        reasons - the old behaviour is that when the handler is closed, the
1255        buffer is flushed, even if the flush level hasn't been exceeded nor the
1256        capacity exceeded. To prevent this, set ``flushOnClose`` to ``False``.
1257        """
1258        BufferingHandler.__init__(self, capacity)
1259        self.flushLevel = flushLevel
1260        self.target = target
1261        # See Issue #26559 for why this has been added
1262        self.flushOnClose = flushOnClose
1263
1264    def shouldFlush(self, record):
1265        """
1266        Check for buffer full or a record at the flushLevel or higher.
1267        """
1268        return (len(self.buffer) >= self.capacity) or \
1269                (record.levelno >= self.flushLevel)
1270
1271    def setTarget(self, target):
1272        """
1273        Set the target handler for this handler.
1274        """
1275        self.target = target
1276
1277    def flush(self):
1278        """
1279        For a MemoryHandler, flushing means just sending the buffered
1280        records to the target, if there is one. Override if you want
1281        different behaviour.
1282
1283        The record buffer is also cleared by this operation.
1284        """
1285        self.acquire()
1286        try:
1287            if self.target:
1288                for record in self.buffer:
1289                    self.target.handle(record)
1290                self.buffer = []
1291        finally:
1292            self.release()
1293
1294    def close(self):
1295        """
1296        Flush, if appropriately configured, set the target to None and lose the
1297        buffer.
1298        """
1299        try:
1300            if self.flushOnClose:
1301                self.flush()
1302        finally:
1303            self.acquire()
1304            try:
1305                self.target = None
1306                BufferingHandler.close(self)
1307            finally:
1308                self.release()
1309
1310
1311class QueueHandler(logging.Handler):
1312    """
1313    This handler sends events to a queue. Typically, it would be used together
1314    with a multiprocessing Queue to centralise logging to file in one process
1315    (in a multi-process application), so as to avoid file write contention
1316    between processes.
1317
1318    This code is new in Python 3.2, but this class can be copy pasted into
1319    user code for use with earlier Python versions.
1320    """
1321
1322    def __init__(self, queue):
1323        """
1324        Initialise an instance, using the passed queue.
1325        """
1326        logging.Handler.__init__(self)
1327        self.queue = queue
1328
1329    def enqueue(self, record):
1330        """
1331        Enqueue a record.
1332
1333        The base implementation uses put_nowait. You may want to override
1334        this method if you want to use blocking, timeouts or custom queue
1335        implementations.
1336        """
1337        self.queue.put_nowait(record)
1338
1339    def prepare(self, record):
1340        """
1341        Prepares a record for queuing. The object returned by this method is
1342        enqueued.
1343
1344        The base implementation formats the record to merge the message
1345        and arguments, and removes unpickleable items from the record
1346        in-place.
1347
1348        You might want to override this method if you want to convert
1349        the record to a dict or JSON string, or send a modified copy
1350        of the record while leaving the original intact.
1351        """
1352        # The format operation gets traceback text into record.exc_text
1353        # (if there's exception data), and also puts the message into
1354        # record.message. We can then use this to replace the original
1355        # msg + args, as these might be unpickleable. We also zap the
1356        # exc_info attribute, as it's no longer needed and, if not None,
1357        # will typically not be pickleable.
1358        self.format(record)
1359        record.msg = record.message
1360        record.args = None
1361        record.exc_info = None
1362        return record
1363
1364    def emit(self, record):
1365        """
1366        Emit a record.
1367
1368        Writes the LogRecord to the queue, preparing it for pickling first.
1369        """
1370        try:
1371            self.enqueue(self.prepare(record))
1372        except Exception:
1373            self.handleError(record)
1374
1375if threading:
1376    class QueueListener(object):
1377        """
1378        This class implements an internal threaded listener which watches for
1379        LogRecords being added to a queue, removes them and passes them to a
1380        list of handlers for processing.
1381        """
1382        _sentinel = None
1383
1384        def __init__(self, queue, *handlers, respect_handler_level=False):
1385            """
1386            Initialise an instance with the specified queue and
1387            handlers.
1388            """
1389            self.queue = queue
1390            self.handlers = handlers
1391            self._thread = None
1392            self.respect_handler_level = respect_handler_level
1393
1394        def dequeue(self, block):
1395            """
1396            Dequeue a record and return it, optionally blocking.
1397
1398            The base implementation uses get. You may want to override this method
1399            if you want to use timeouts or work with custom queue implementations.
1400            """
1401            return self.queue.get(block)
1402
1403        def start(self):
1404            """
1405            Start the listener.
1406
1407            This starts up a background thread to monitor the queue for
1408            LogRecords to process.
1409            """
1410            self._thread = t = threading.Thread(target=self._monitor)
1411            t.daemon = True
1412            t.start()
1413
1414        def prepare(self , record):
1415            """
1416            Prepare a record for handling.
1417
1418            This method just returns the passed-in record. You may want to
1419            override this method if you need to do any custom marshalling or
1420            manipulation of the record before passing it to the handlers.
1421            """
1422            return record
1423
1424        def handle(self, record):
1425            """
1426            Handle a record.
1427
1428            This just loops through the handlers offering them the record
1429            to handle.
1430            """
1431            record = self.prepare(record)
1432            for handler in self.handlers:
1433                if not self.respect_handler_level:
1434                    process = True
1435                else:
1436                    process = record.levelno >= handler.level
1437                if process:
1438                    handler.handle(record)
1439
1440        def _monitor(self):
1441            """
1442            Monitor the queue for records, and ask the handler
1443            to deal with them.
1444
1445            This method runs on a separate, internal thread.
1446            The thread will terminate if it sees a sentinel object in the queue.
1447            """
1448            q = self.queue
1449            has_task_done = hasattr(q, 'task_done')
1450            while True:
1451                try:
1452                    record = self.dequeue(True)
1453                    if record is self._sentinel:
1454                        break
1455                    self.handle(record)
1456                    if has_task_done:
1457                        q.task_done()
1458                except queue.Empty:
1459                    break
1460
1461        def enqueue_sentinel(self):
1462            """
1463            This is used to enqueue the sentinel record.
1464
1465            The base implementation uses put_nowait. You may want to override this
1466            method if you want to use timeouts or work with custom queue
1467            implementations.
1468            """
1469            self.queue.put_nowait(self._sentinel)
1470
1471        def stop(self):
1472            """
1473            Stop the listener.
1474
1475            This asks the thread to terminate, and then waits for it to do so.
1476            Note that if you don't call this before your application exits, there
1477            may be some records still left on the queue, which won't be processed.
1478            """
1479            self.enqueue_sentinel()
1480            self._thread.join()
1481            self._thread = None
1482