• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2001-2013 by Vinay Sajip. All Rights Reserved.
2#
3# Permission to use, copy, modify, and distribute this software and its
4# documentation for any purpose and without fee is hereby granted,
5# provided that the above copyright notice appear in all copies and that
6# both that copyright notice and this permission notice appear in
7# supporting documentation, and that the name of Vinay Sajip
8# not be used in advertising or publicity pertaining to distribution
9# of the software without specific, written prior permission.
10# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
11# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
12# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
13# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
14# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
15# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16
17"""
18Additional handlers for the logging package for Python. The core package is
19based on PEP 282 and comments thereto in comp.lang.python.
20
21Copyright (C) 2001-2013 Vinay Sajip. All Rights Reserved.
22
23To use, simply 'import logging.handlers' and log away!
24"""
25
26import errno, logging, socket, os, cPickle, struct, time, re
27from stat import ST_DEV, ST_INO, ST_MTIME
28
29try:
30    import codecs
31except ImportError:
32    codecs = None
33try:
34    unicode
35    _unicode = True
36except NameError:
37    _unicode = False
38
39#
40# Some constants...
41#
42
43DEFAULT_TCP_LOGGING_PORT    = 9020
44DEFAULT_UDP_LOGGING_PORT    = 9021
45DEFAULT_HTTP_LOGGING_PORT   = 9022
46DEFAULT_SOAP_LOGGING_PORT   = 9023
47SYSLOG_UDP_PORT             = 514
48SYSLOG_TCP_PORT             = 514
49
50_MIDNIGHT = 24 * 60 * 60  # number of seconds in a day
51
52class BaseRotatingHandler(logging.FileHandler):
53    """
54    Base class for handlers that rotate log files at a certain point.
55    Not meant to be instantiated directly.  Instead, use RotatingFileHandler
56    or TimedRotatingFileHandler.
57    """
58    def __init__(self, filename, mode, encoding=None, delay=0):
59        """
60        Use the specified filename for streamed logging
61        """
62        if codecs is None:
63            encoding = None
64        logging.FileHandler.__init__(self, filename, mode, encoding, delay)
65        self.mode = mode
66        self.encoding = encoding
67
68    def emit(self, record):
69        """
70        Emit a record.
71
72        Output the record to the file, catering for rollover as described
73        in doRollover().
74        """
75        try:
76            if self.shouldRollover(record):
77                self.doRollover()
78            logging.FileHandler.emit(self, record)
79        except (KeyboardInterrupt, SystemExit):
80            raise
81        except:
82            self.handleError(record)
83
84class RotatingFileHandler(BaseRotatingHandler):
85    """
86    Handler for logging to a set of files, which switches from one file
87    to the next when the current file reaches a certain size.
88    """
89    def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, encoding=None, delay=0):
90        """
91        Open the specified file and use it as the stream for logging.
92
93        By default, the file grows indefinitely. You can specify particular
94        values of maxBytes and backupCount to allow the file to rollover at
95        a predetermined size.
96
97        Rollover occurs whenever the current log file is nearly maxBytes in
98        length. If backupCount is >= 1, the system will successively create
99        new files with the same pathname as the base file, but with extensions
100        ".1", ".2" etc. appended to it. For example, with a backupCount of 5
101        and a base file name of "app.log", you would get "app.log",
102        "app.log.1", "app.log.2", ... through to "app.log.5". The file being
103        written to is always "app.log" - when it gets filled up, it is closed
104        and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
105        exist, then they are renamed to "app.log.2", "app.log.3" etc.
106        respectively.
107
108        If maxBytes is zero, rollover never occurs.
109        """
110        # If rotation/rollover is wanted, it doesn't make sense to use another
111        # mode. If for example 'w' were specified, then if there were multiple
112        # runs of the calling application, the logs from previous runs would be
113        # lost if the 'w' is respected, because the log file would be truncated
114        # on each run.
115        if maxBytes > 0:
116            mode = 'a'
117        BaseRotatingHandler.__init__(self, filename, mode, encoding, delay)
118        self.maxBytes = maxBytes
119        self.backupCount = backupCount
120
121    def doRollover(self):
122        """
123        Do a rollover, as described in __init__().
124        """
125        if self.stream:
126            self.stream.close()
127            self.stream = None
128        if self.backupCount > 0:
129            for i in range(self.backupCount - 1, 0, -1):
130                sfn = "%s.%d" % (self.baseFilename, i)
131                dfn = "%s.%d" % (self.baseFilename, i + 1)
132                if os.path.exists(sfn):
133                    #print "%s -> %s" % (sfn, dfn)
134                    if os.path.exists(dfn):
135                        os.remove(dfn)
136                    os.rename(sfn, dfn)
137            dfn = self.baseFilename + ".1"
138            if os.path.exists(dfn):
139                os.remove(dfn)
140            # Issue 18940: A file may not have been created if delay is True.
141            if os.path.exists(self.baseFilename):
142                os.rename(self.baseFilename, dfn)
143        if not self.delay:
144            self.stream = self._open()
145
146    def shouldRollover(self, record):
147        """
148        Determine if rollover should occur.
149
150        Basically, see if the supplied record would cause the file to exceed
151        the size limit we have.
152        """
153        if self.stream is None:                 # delay was set...
154            self.stream = self._open()
155        if self.maxBytes > 0:                   # are we rolling over?
156            msg = "%s\n" % self.format(record)
157            self.stream.seek(0, 2)  #due to non-posix-compliant Windows feature
158            if self.stream.tell() + len(msg) >= self.maxBytes:
159                return 1
160        return 0
161
162class TimedRotatingFileHandler(BaseRotatingHandler):
163    """
164    Handler for logging to a file, rotating the log file at certain timed
165    intervals.
166
167    If backupCount is > 0, when rollover is done, no more than backupCount
168    files are kept - the oldest ones are deleted.
169    """
170    def __init__(self, filename, when='h', interval=1, backupCount=0, encoding=None, delay=False, utc=False):
171        BaseRotatingHandler.__init__(self, filename, 'a', encoding, delay)
172        self.when = when.upper()
173        self.backupCount = backupCount
174        self.utc = utc
175        # Calculate the real rollover interval, which is just the number of
176        # seconds between rollovers.  Also set the filename suffix used when
177        # a rollover occurs.  Current 'when' events supported:
178        # S - Seconds
179        # M - Minutes
180        # H - Hours
181        # D - Days
182        # midnight - roll over at midnight
183        # W{0-6} - roll over on a certain day; 0 - Monday
184        #
185        # Case of the 'when' specifier is not important; lower or upper case
186        # will work.
187        if self.when == 'S':
188            self.interval = 1 # one second
189            self.suffix = "%Y-%m-%d_%H-%M-%S"
190            self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}$"
191        elif self.when == 'M':
192            self.interval = 60 # one minute
193            self.suffix = "%Y-%m-%d_%H-%M"
194            self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}$"
195        elif self.when == 'H':
196            self.interval = 60 * 60 # one hour
197            self.suffix = "%Y-%m-%d_%H"
198            self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}$"
199        elif self.when == 'D' or self.when == 'MIDNIGHT':
200            self.interval = 60 * 60 * 24 # one day
201            self.suffix = "%Y-%m-%d"
202            self.extMatch = r"^\d{4}-\d{2}-\d{2}$"
203        elif self.when.startswith('W'):
204            self.interval = 60 * 60 * 24 * 7 # one week
205            if len(self.when) != 2:
206                raise ValueError("You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s" % self.when)
207            if self.when[1] < '0' or self.when[1] > '6':
208                raise ValueError("Invalid day specified for weekly rollover: %s" % self.when)
209            self.dayOfWeek = int(self.when[1])
210            self.suffix = "%Y-%m-%d"
211            self.extMatch = r"^\d{4}-\d{2}-\d{2}$"
212        else:
213            raise ValueError("Invalid rollover interval specified: %s" % self.when)
214
215        self.extMatch = re.compile(self.extMatch)
216        self.interval = self.interval * interval # multiply by units requested
217        if os.path.exists(filename):
218            t = os.stat(filename)[ST_MTIME]
219        else:
220            t = int(time.time())
221        self.rolloverAt = self.computeRollover(t)
222
223    def computeRollover(self, currentTime):
224        """
225        Work out the rollover time based on the specified time.
226        """
227        result = currentTime + self.interval
228        # If we are rolling over at midnight or weekly, then the interval is already known.
229        # What we need to figure out is WHEN the next interval is.  In other words,
230        # if you are rolling over at midnight, then your base interval is 1 day,
231        # but you want to start that one day clock at midnight, not now.  So, we
232        # have to fudge the rolloverAt value in order to trigger the first rollover
233        # at the right time.  After that, the regular interval will take care of
234        # the rest.  Note that this code doesn't care about leap seconds. :)
235        if self.when == 'MIDNIGHT' or self.when.startswith('W'):
236            # This could be done with less code, but I wanted it to be clear
237            if self.utc:
238                t = time.gmtime(currentTime)
239            else:
240                t = time.localtime(currentTime)
241            currentHour = t[3]
242            currentMinute = t[4]
243            currentSecond = t[5]
244            # r is the number of seconds left between now and midnight
245            r = _MIDNIGHT - ((currentHour * 60 + currentMinute) * 60 +
246                    currentSecond)
247            result = currentTime + r
248            # If we are rolling over on a certain day, add in the number of days until
249            # the next rollover, but offset by 1 since we just calculated the time
250            # until the next day starts.  There are three cases:
251            # Case 1) The day to rollover is today; in this case, do nothing
252            # Case 2) The day to rollover is further in the interval (i.e., today is
253            #         day 2 (Wednesday) and rollover is on day 6 (Sunday).  Days to
254            #         next rollover is simply 6 - 2 - 1, or 3.
255            # Case 3) The day to rollover is behind us in the interval (i.e., today
256            #         is day 5 (Saturday) and rollover is on day 3 (Thursday).
257            #         Days to rollover is 6 - 5 + 3, or 4.  In this case, it's the
258            #         number of days left in the current week (1) plus the number
259            #         of days in the next week until the rollover day (3).
260            # The calculations described in 2) and 3) above need to have a day added.
261            # This is because the above time calculation takes us to midnight on this
262            # day, i.e. the start of the next day.
263            if self.when.startswith('W'):
264                day = t[6] # 0 is Monday
265                if day != self.dayOfWeek:
266                    if day < self.dayOfWeek:
267                        daysToWait = self.dayOfWeek - day
268                    else:
269                        daysToWait = 6 - day + self.dayOfWeek + 1
270                    newRolloverAt = result + (daysToWait * (60 * 60 * 24))
271                    if not self.utc:
272                        dstNow = t[-1]
273                        dstAtRollover = time.localtime(newRolloverAt)[-1]
274                        if dstNow != dstAtRollover:
275                            if not dstNow:  # DST kicks in before next rollover, so we need to deduct an hour
276                                addend = -3600
277                            else:           # DST bows out before next rollover, so we need to add an hour
278                                addend = 3600
279                            newRolloverAt += addend
280                    result = newRolloverAt
281        return result
282
283    def shouldRollover(self, record):
284        """
285        Determine if rollover should occur.
286
287        record is not used, as we are just comparing times, but it is needed so
288        the method signatures are the same
289        """
290        t = int(time.time())
291        if t >= self.rolloverAt:
292            return 1
293        #print "No need to rollover: %d, %d" % (t, self.rolloverAt)
294        return 0
295
296    def getFilesToDelete(self):
297        """
298        Determine the files to delete when rolling over.
299
300        More specific than the earlier method, which just used glob.glob().
301        """
302        dirName, baseName = os.path.split(self.baseFilename)
303        fileNames = os.listdir(dirName)
304        result = []
305        prefix = baseName + "."
306        plen = len(prefix)
307        for fileName in fileNames:
308            if fileName[:plen] == prefix:
309                suffix = fileName[plen:]
310                if self.extMatch.match(suffix):
311                    result.append(os.path.join(dirName, fileName))
312        result.sort()
313        if len(result) < self.backupCount:
314            result = []
315        else:
316            result = result[:len(result) - self.backupCount]
317        return result
318
319    def doRollover(self):
320        """
321        do a rollover; in this case, a date/time stamp is appended to the filename
322        when the rollover happens.  However, you want the file to be named for the
323        start of the interval, not the current time.  If there is a backup count,
324        then we have to get a list of matching filenames, sort them and remove
325        the one with the oldest suffix.
326        """
327        if self.stream:
328            self.stream.close()
329            self.stream = None
330        # get the time that this sequence started at and make it a TimeTuple
331        currentTime = int(time.time())
332        dstNow = time.localtime(currentTime)[-1]
333        t = self.rolloverAt - self.interval
334        if self.utc:
335            timeTuple = time.gmtime(t)
336        else:
337            timeTuple = time.localtime(t)
338            dstThen = timeTuple[-1]
339            if dstNow != dstThen:
340                if dstNow:
341                    addend = 3600
342                else:
343                    addend = -3600
344                timeTuple = time.localtime(t + addend)
345        dfn = self.baseFilename + "." + time.strftime(self.suffix, timeTuple)
346        if os.path.exists(dfn):
347            os.remove(dfn)
348        # Issue 18940: A file may not have been created if delay is True.
349        if os.path.exists(self.baseFilename):
350            os.rename(self.baseFilename, dfn)
351        if self.backupCount > 0:
352            for s in self.getFilesToDelete():
353                os.remove(s)
354        if not self.delay:
355            self.stream = self._open()
356        newRolloverAt = self.computeRollover(currentTime)
357        while newRolloverAt <= currentTime:
358            newRolloverAt = newRolloverAt + self.interval
359        #If DST changes and midnight or weekly rollover, adjust for this.
360        if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc:
361            dstAtRollover = time.localtime(newRolloverAt)[-1]
362            if dstNow != dstAtRollover:
363                if not dstNow:  # DST kicks in before next rollover, so we need to deduct an hour
364                    addend = -3600
365                else:           # DST bows out before next rollover, so we need to add an hour
366                    addend = 3600
367                newRolloverAt += addend
368        self.rolloverAt = newRolloverAt
369
370class WatchedFileHandler(logging.FileHandler):
371    """
372    A handler for logging to a file, which watches the file
373    to see if it has changed while in use. This can happen because of
374    usage of programs such as newsyslog and logrotate which perform
375    log file rotation. This handler, intended for use under Unix,
376    watches the file to see if it has changed since the last emit.
377    (A file has changed if its device or inode have changed.)
378    If it has changed, the old file stream is closed, and the file
379    opened to get a new stream.
380
381    This handler is not appropriate for use under Windows, because
382    under Windows open files cannot be moved or renamed - logging
383    opens the files with exclusive locks - and so there is no need
384    for such a handler. Furthermore, ST_INO is not supported under
385    Windows; stat always returns zero for this value.
386
387    This handler is based on a suggestion and patch by Chad J.
388    Schroeder.
389    """
390    def __init__(self, filename, mode='a', encoding=None, delay=0):
391        logging.FileHandler.__init__(self, filename, mode, encoding, delay)
392        self.dev, self.ino = -1, -1
393        self._statstream()
394
395    def _statstream(self):
396        if self.stream:
397            sres = os.fstat(self.stream.fileno())
398            self.dev, self.ino = sres[ST_DEV], sres[ST_INO]
399
400    def emit(self, record):
401        """
402        Emit a record.
403
404        First check if the underlying file has changed, and if it
405        has, close the old stream and reopen the file to get the
406        current stream.
407        """
408        # Reduce the chance of race conditions by stat'ing by path only
409        # once and then fstat'ing our new fd if we opened a new log stream.
410        # See issue #14632: Thanks to John Mulligan for the problem report
411        # and patch.
412        try:
413            # stat the file by path, checking for existence
414            sres = os.stat(self.baseFilename)
415        except OSError as err:
416            if err.errno == errno.ENOENT:
417                sres = None
418            else:
419                raise
420        # compare file system stat with that of our stream file handle
421        if not sres or sres[ST_DEV] != self.dev or sres[ST_INO] != self.ino:
422            if self.stream is not None:
423                # we have an open file handle, clean it up
424                self.stream.flush()
425                self.stream.close()
426                self.stream = None  # See Issue #21742: _open () might fail.
427                # open a new file handle and get new stat info from that fd
428                self.stream = self._open()
429                self._statstream()
430        logging.FileHandler.emit(self, record)
431
432class SocketHandler(logging.Handler):
433    """
434    A handler class which writes logging records, in pickle format, to
435    a streaming socket. The socket is kept open across logging calls.
436    If the peer resets it, an attempt is made to reconnect on the next call.
437    The pickle which is sent is that of the LogRecord's attribute dictionary
438    (__dict__), so that the receiver does not need to have the logging module
439    installed in order to process the logging event.
440
441    To unpickle the record at the receiving end into a LogRecord, use the
442    makeLogRecord function.
443    """
444
445    def __init__(self, host, port):
446        """
447        Initializes the handler with a specific host address and port.
448
449        The attribute 'closeOnError' is set to 1 - which means that if
450        a socket error occurs, the socket is silently closed and then
451        reopened on the next logging call.
452        """
453        logging.Handler.__init__(self)
454        self.host = host
455        self.port = port
456        self.sock = None
457        self.closeOnError = 0
458        self.retryTime = None
459        #
460        # Exponential backoff parameters.
461        #
462        self.retryStart = 1.0
463        self.retryMax = 30.0
464        self.retryFactor = 2.0
465
466    def makeSocket(self, timeout=1):
467        """
468        A factory method which allows subclasses to define the precise
469        type of socket they want.
470        """
471        s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
472        if hasattr(s, 'settimeout'):
473            s.settimeout(timeout)
474        s.connect((self.host, self.port))
475        return s
476
477    def createSocket(self):
478        """
479        Try to create a socket, using an exponential backoff with
480        a max retry time. Thanks to Robert Olson for the original patch
481        (SF #815911) which has been slightly refactored.
482        """
483        now = time.time()
484        # Either retryTime is None, in which case this
485        # is the first time back after a disconnect, or
486        # we've waited long enough.
487        if self.retryTime is None:
488            attempt = 1
489        else:
490            attempt = (now >= self.retryTime)
491        if attempt:
492            try:
493                self.sock = self.makeSocket()
494                self.retryTime = None # next time, no delay before trying
495            except socket.error:
496                #Creation failed, so set the retry time and return.
497                if self.retryTime is None:
498                    self.retryPeriod = self.retryStart
499                else:
500                    self.retryPeriod = self.retryPeriod * self.retryFactor
501                    if self.retryPeriod > self.retryMax:
502                        self.retryPeriod = self.retryMax
503                self.retryTime = now + self.retryPeriod
504
505    def send(self, s):
506        """
507        Send a pickled string to the socket.
508
509        This function allows for partial sends which can happen when the
510        network is busy.
511        """
512        if self.sock is None:
513            self.createSocket()
514        #self.sock can be None either because we haven't reached the retry
515        #time yet, or because we have reached the retry time and retried,
516        #but are still unable to connect.
517        if self.sock:
518            try:
519                if hasattr(self.sock, "sendall"):
520                    self.sock.sendall(s)
521                else:
522                    sentsofar = 0
523                    left = len(s)
524                    while left > 0:
525                        sent = self.sock.send(s[sentsofar:])
526                        sentsofar = sentsofar + sent
527                        left = left - sent
528            except socket.error:
529                self.sock.close()
530                self.sock = None  # so we can call createSocket next time
531
532    def makePickle(self, record):
533        """
534        Pickles the record in binary format with a length prefix, and
535        returns it ready for transmission across the socket.
536        """
537        ei = record.exc_info
538        if ei:
539            # just to get traceback text into record.exc_text ...
540            dummy = self.format(record)
541            record.exc_info = None  # to avoid Unpickleable error
542        # See issue #14436: If msg or args are objects, they may not be
543        # available on the receiving end. So we convert the msg % args
544        # to a string, save it as msg and zap the args.
545        d = dict(record.__dict__)
546        d['msg'] = record.getMessage()
547        d['args'] = None
548        s = cPickle.dumps(d, 1)
549        if ei:
550            record.exc_info = ei  # for next handler
551        slen = struct.pack(">L", len(s))
552        return slen + s
553
554    def handleError(self, record):
555        """
556        Handle an error during logging.
557
558        An error has occurred during logging. Most likely cause -
559        connection lost. Close the socket so that we can retry on the
560        next event.
561        """
562        if self.closeOnError and self.sock:
563            self.sock.close()
564            self.sock = None        #try to reconnect next time
565        else:
566            logging.Handler.handleError(self, record)
567
568    def emit(self, record):
569        """
570        Emit a record.
571
572        Pickles the record and writes it to the socket in binary format.
573        If there is an error with the socket, silently drop the packet.
574        If there was a problem with the socket, re-establishes the
575        socket.
576        """
577        try:
578            s = self.makePickle(record)
579            self.send(s)
580        except (KeyboardInterrupt, SystemExit):
581            raise
582        except:
583            self.handleError(record)
584
585    def close(self):
586        """
587        Closes the socket.
588        """
589        self.acquire()
590        try:
591            sock = self.sock
592            if sock:
593                self.sock = None
594                sock.close()
595        finally:
596            self.release()
597        logging.Handler.close(self)
598
599class DatagramHandler(SocketHandler):
600    """
601    A handler class which writes logging records, in pickle format, to
602    a datagram socket.  The pickle which is sent is that of the LogRecord's
603    attribute dictionary (__dict__), so that the receiver does not need to
604    have the logging module installed in order to process the logging event.
605
606    To unpickle the record at the receiving end into a LogRecord, use the
607    makeLogRecord function.
608
609    """
610    def __init__(self, host, port):
611        """
612        Initializes the handler with a specific host address and port.
613        """
614        SocketHandler.__init__(self, host, port)
615        self.closeOnError = 0
616
617    def makeSocket(self):
618        """
619        The factory method of SocketHandler is here overridden to create
620        a UDP socket (SOCK_DGRAM).
621        """
622        s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
623        return s
624
625    def send(self, s):
626        """
627        Send a pickled string to a socket.
628
629        This function no longer allows for partial sends which can happen
630        when the network is busy - UDP does not guarantee delivery and
631        can deliver packets out of sequence.
632        """
633        if self.sock is None:
634            self.createSocket()
635        self.sock.sendto(s, (self.host, self.port))
636
637class SysLogHandler(logging.Handler):
638    """
639    A handler class which sends formatted logging records to a syslog
640    server. Based on Sam Rushing's syslog module:
641    http://www.nightmare.com/squirl/python-ext/misc/syslog.py
642    Contributed by Nicolas Untz (after which minor refactoring changes
643    have been made).
644    """
645
646    # from <linux/sys/syslog.h>:
647    # ======================================================================
648    # priorities/facilities are encoded into a single 32-bit quantity, where
649    # the bottom 3 bits are the priority (0-7) and the top 28 bits are the
650    # facility (0-big number). Both the priorities and the facilities map
651    # roughly one-to-one to strings in the syslogd(8) source code.  This
652    # mapping is included in this file.
653    #
654    # priorities (these are ordered)
655
656    LOG_EMERG     = 0       #  system is unusable
657    LOG_ALERT     = 1       #  action must be taken immediately
658    LOG_CRIT      = 2       #  critical conditions
659    LOG_ERR       = 3       #  error conditions
660    LOG_WARNING   = 4       #  warning conditions
661    LOG_NOTICE    = 5       #  normal but significant condition
662    LOG_INFO      = 6       #  informational
663    LOG_DEBUG     = 7       #  debug-level messages
664
665    #  facility codes
666    LOG_KERN      = 0       #  kernel messages
667    LOG_USER      = 1       #  random user-level messages
668    LOG_MAIL      = 2       #  mail system
669    LOG_DAEMON    = 3       #  system daemons
670    LOG_AUTH      = 4       #  security/authorization messages
671    LOG_SYSLOG    = 5       #  messages generated internally by syslogd
672    LOG_LPR       = 6       #  line printer subsystem
673    LOG_NEWS      = 7       #  network news subsystem
674    LOG_UUCP      = 8       #  UUCP subsystem
675    LOG_CRON      = 9       #  clock daemon
676    LOG_AUTHPRIV  = 10      #  security/authorization messages (private)
677    LOG_FTP       = 11      #  FTP daemon
678
679    #  other codes through 15 reserved for system use
680    LOG_LOCAL0    = 16      #  reserved for local use
681    LOG_LOCAL1    = 17      #  reserved for local use
682    LOG_LOCAL2    = 18      #  reserved for local use
683    LOG_LOCAL3    = 19      #  reserved for local use
684    LOG_LOCAL4    = 20      #  reserved for local use
685    LOG_LOCAL5    = 21      #  reserved for local use
686    LOG_LOCAL6    = 22      #  reserved for local use
687    LOG_LOCAL7    = 23      #  reserved for local use
688
689    priority_names = {
690        "alert":    LOG_ALERT,
691        "crit":     LOG_CRIT,
692        "critical": LOG_CRIT,
693        "debug":    LOG_DEBUG,
694        "emerg":    LOG_EMERG,
695        "err":      LOG_ERR,
696        "error":    LOG_ERR,        #  DEPRECATED
697        "info":     LOG_INFO,
698        "notice":   LOG_NOTICE,
699        "panic":    LOG_EMERG,      #  DEPRECATED
700        "warn":     LOG_WARNING,    #  DEPRECATED
701        "warning":  LOG_WARNING,
702        }
703
704    facility_names = {
705        "auth":     LOG_AUTH,
706        "authpriv": LOG_AUTHPRIV,
707        "cron":     LOG_CRON,
708        "daemon":   LOG_DAEMON,
709        "ftp":      LOG_FTP,
710        "kern":     LOG_KERN,
711        "lpr":      LOG_LPR,
712        "mail":     LOG_MAIL,
713        "news":     LOG_NEWS,
714        "security": LOG_AUTH,       #  DEPRECATED
715        "syslog":   LOG_SYSLOG,
716        "user":     LOG_USER,
717        "uucp":     LOG_UUCP,
718        "local0":   LOG_LOCAL0,
719        "local1":   LOG_LOCAL1,
720        "local2":   LOG_LOCAL2,
721        "local3":   LOG_LOCAL3,
722        "local4":   LOG_LOCAL4,
723        "local5":   LOG_LOCAL5,
724        "local6":   LOG_LOCAL6,
725        "local7":   LOG_LOCAL7,
726        }
727
728    #The map below appears to be trivially lowercasing the key. However,
729    #there's more to it than meets the eye - in some locales, lowercasing
730    #gives unexpected results. See SF #1524081: in the Turkish locale,
731    #"INFO".lower() != "info"
732    priority_map = {
733        "DEBUG" : "debug",
734        "INFO" : "info",
735        "WARNING" : "warning",
736        "ERROR" : "error",
737        "CRITICAL" : "critical"
738    }
739
740    def __init__(self, address=('localhost', SYSLOG_UDP_PORT),
741                 facility=LOG_USER, socktype=None):
742        """
743        Initialize a handler.
744
745        If address is specified as a string, a UNIX socket is used. To log to a
746        local syslogd, "SysLogHandler(address="/dev/log")" can be used.
747        If facility is not specified, LOG_USER is used. If socktype is
748        specified as socket.SOCK_DGRAM or socket.SOCK_STREAM, that specific
749        socket type will be used. For Unix sockets, you can also specify a
750        socktype of None, in which case socket.SOCK_DGRAM will be used, falling
751        back to socket.SOCK_STREAM.
752        """
753        logging.Handler.__init__(self)
754
755        self.address = address
756        self.facility = facility
757        self.socktype = socktype
758
759        if isinstance(address, basestring):
760            self.unixsocket = 1
761            self._connect_unixsocket(address)
762        else:
763            self.unixsocket = False
764            if socktype is None:
765                socktype = socket.SOCK_DGRAM
766            host, port = address
767            ress = socket.getaddrinfo(host, port, 0, socktype)
768            if not ress:
769                raise socket.error("getaddrinfo returns an empty list")
770            for res in ress:
771                af, socktype, proto, _, sa = res
772                err = sock = None
773                try:
774                    sock = socket.socket(af, socktype, proto)
775                    if socktype == socket.SOCK_STREAM:
776                        sock.connect(sa)
777                    break
778                except socket.error as exc:
779                    err = exc
780                    if sock is not None:
781                        sock.close()
782            if err is not None:
783                raise err
784            self.socket = sock
785            self.socktype = socktype
786
787    def _connect_unixsocket(self, address):
788        use_socktype = self.socktype
789        if use_socktype is None:
790            use_socktype = socket.SOCK_DGRAM
791        self.socket = socket.socket(socket.AF_UNIX, use_socktype)
792        try:
793            self.socket.connect(address)
794            # it worked, so set self.socktype to the used type
795            self.socktype = use_socktype
796        except socket.error:
797            self.socket.close()
798            if self.socktype is not None:
799                # user didn't specify falling back, so fail
800                raise
801            use_socktype = socket.SOCK_STREAM
802            self.socket = socket.socket(socket.AF_UNIX, use_socktype)
803            try:
804                self.socket.connect(address)
805                # it worked, so set self.socktype to the used type
806                self.socktype = use_socktype
807            except socket.error:
808                self.socket.close()
809                raise
810
811    # curious: when talking to the unix-domain '/dev/log' socket, a
812    #   zero-terminator seems to be required.  this string is placed
813    #   into a class variable so that it can be overridden if
814    #   necessary.
815    log_format_string = '<%d>%s\000'
816
817    def encodePriority(self, facility, priority):
818        """
819        Encode the facility and priority. You can pass in strings or
820        integers - if strings are passed, the facility_names and
821        priority_names mapping dictionaries are used to convert them to
822        integers.
823        """
824        if isinstance(facility, basestring):
825            facility = self.facility_names[facility]
826        if isinstance(priority, basestring):
827            priority = self.priority_names[priority]
828        return (facility << 3) | priority
829
830    def close(self):
831        """
832        Closes the socket.
833        """
834        self.acquire()
835        try:
836            if self.unixsocket:
837                self.socket.close()
838        finally:
839            self.release()
840        logging.Handler.close(self)
841
842    def mapPriority(self, levelName):
843        """
844        Map a logging level name to a key in the priority_names map.
845        This is useful in two scenarios: when custom levels are being
846        used, and in the case where you can't do a straightforward
847        mapping by lowercasing the logging level name because of locale-
848        specific issues (see SF #1524081).
849        """
850        return self.priority_map.get(levelName, "warning")
851
852    def emit(self, record):
853        """
854        Emit a record.
855
856        The record is formatted, and then sent to the syslog server. If
857        exception information is present, it is NOT sent to the server.
858        """
859        try:
860            msg = self.format(record) + '\000'
861            """
862            We need to convert record level to lowercase, maybe this will
863            change in the future.
864            """
865            prio = '<%d>' % self.encodePriority(self.facility,
866                                                self.mapPriority(record.levelname))
867            # Message is a string. Convert to bytes as required by RFC 5424
868            if type(msg) is unicode:
869                msg = msg.encode('utf-8')
870            msg = prio + msg
871            if self.unixsocket:
872                try:
873                    self.socket.send(msg)
874                except socket.error:
875                    self.socket.close() # See issue 17981
876                    self._connect_unixsocket(self.address)
877                    self.socket.send(msg)
878            elif self.socktype == socket.SOCK_DGRAM:
879                self.socket.sendto(msg, self.address)
880            else:
881                self.socket.sendall(msg)
882        except (KeyboardInterrupt, SystemExit):
883            raise
884        except:
885            self.handleError(record)
886
887class SMTPHandler(logging.Handler):
888    """
889    A handler class which sends an SMTP email for each logging event.
890    """
891    def __init__(self, mailhost, fromaddr, toaddrs, subject,
892                 credentials=None, secure=None):
893        """
894        Initialize the handler.
895
896        Initialize the instance with the from and to addresses and subject
897        line of the email. To specify a non-standard SMTP port, use the
898        (host, port) tuple format for the mailhost argument. To specify
899        authentication credentials, supply a (username, password) tuple
900        for the credentials argument. To specify the use of a secure
901        protocol (TLS), pass in a tuple for the secure argument. This will
902        only be used when authentication credentials are supplied. The tuple
903        will be either an empty tuple, or a single-value tuple with the name
904        of a keyfile, or a 2-value tuple with the names of the keyfile and
905        certificate file. (This tuple is passed to the `starttls` method).
906        """
907        logging.Handler.__init__(self)
908        if isinstance(mailhost, (list, tuple)):
909            self.mailhost, self.mailport = mailhost
910        else:
911            self.mailhost, self.mailport = mailhost, None
912        if isinstance(credentials, (list, tuple)):
913            self.username, self.password = credentials
914        else:
915            self.username = None
916        self.fromaddr = fromaddr
917        if isinstance(toaddrs, basestring):
918            toaddrs = [toaddrs]
919        self.toaddrs = toaddrs
920        self.subject = subject
921        self.secure = secure
922        self._timeout = 5.0
923
924    def getSubject(self, record):
925        """
926        Determine the subject for the email.
927
928        If you want to specify a subject line which is record-dependent,
929        override this method.
930        """
931        return self.subject
932
933    def emit(self, record):
934        """
935        Emit a record.
936
937        Format the record and send it to the specified addressees.
938        """
939        try:
940            import smtplib
941            from email.utils import formatdate
942            port = self.mailport
943            if not port:
944                port = smtplib.SMTP_PORT
945            smtp = smtplib.SMTP(self.mailhost, port, timeout=self._timeout)
946            msg = self.format(record)
947            msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % (
948                            self.fromaddr,
949                            ",".join(self.toaddrs),
950                            self.getSubject(record),
951                            formatdate(), msg)
952            if self.username:
953                if self.secure is not None:
954                    smtp.ehlo()
955                    smtp.starttls(*self.secure)
956                    smtp.ehlo()
957                smtp.login(self.username, self.password)
958            smtp.sendmail(self.fromaddr, self.toaddrs, msg)
959            smtp.quit()
960        except (KeyboardInterrupt, SystemExit):
961            raise
962        except:
963            self.handleError(record)
964
965class NTEventLogHandler(logging.Handler):
966    """
967    A handler class which sends events to the NT Event Log. Adds a
968    registry entry for the specified application name. If no dllname is
969    provided, win32service.pyd (which contains some basic message
970    placeholders) is used. Note that use of these placeholders will make
971    your event logs big, as the entire message source is held in the log.
972    If you want slimmer logs, you have to pass in the name of your own DLL
973    which contains the message definitions you want to use in the event log.
974    """
975    def __init__(self, appname, dllname=None, logtype="Application"):
976        logging.Handler.__init__(self)
977        try:
978            import win32evtlogutil, win32evtlog
979            self.appname = appname
980            self._welu = win32evtlogutil
981            if not dllname:
982                dllname = os.path.split(self._welu.__file__)
983                dllname = os.path.split(dllname[0])
984                dllname = os.path.join(dllname[0], r'win32service.pyd')
985            self.dllname = dllname
986            self.logtype = logtype
987            self._welu.AddSourceToRegistry(appname, dllname, logtype)
988            self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE
989            self.typemap = {
990                logging.DEBUG   : win32evtlog.EVENTLOG_INFORMATION_TYPE,
991                logging.INFO    : win32evtlog.EVENTLOG_INFORMATION_TYPE,
992                logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE,
993                logging.ERROR   : win32evtlog.EVENTLOG_ERROR_TYPE,
994                logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE,
995         }
996        except ImportError:
997            print("The Python Win32 extensions for NT (service, event "\
998                        "logging) appear not to be available.")
999            self._welu = None
1000
1001    def getMessageID(self, record):
1002        """
1003        Return the message ID for the event record. If you are using your
1004        own messages, you could do this by having the msg passed to the
1005        logger being an ID rather than a formatting string. Then, in here,
1006        you could use a dictionary lookup to get the message ID. This
1007        version returns 1, which is the base message ID in win32service.pyd.
1008        """
1009        return 1
1010
1011    def getEventCategory(self, record):
1012        """
1013        Return the event category for the record.
1014
1015        Override this if you want to specify your own categories. This version
1016        returns 0.
1017        """
1018        return 0
1019
1020    def getEventType(self, record):
1021        """
1022        Return the event type for the record.
1023
1024        Override this if you want to specify your own types. This version does
1025        a mapping using the handler's typemap attribute, which is set up in
1026        __init__() to a dictionary which contains mappings for DEBUG, INFO,
1027        WARNING, ERROR and CRITICAL. If you are using your own levels you will
1028        either need to override this method or place a suitable dictionary in
1029        the handler's typemap attribute.
1030        """
1031        return self.typemap.get(record.levelno, self.deftype)
1032
1033    def emit(self, record):
1034        """
1035        Emit a record.
1036
1037        Determine the message ID, event category and event type. Then
1038        log the message in the NT event log.
1039        """
1040        if self._welu:
1041            try:
1042                id = self.getMessageID(record)
1043                cat = self.getEventCategory(record)
1044                type = self.getEventType(record)
1045                msg = self.format(record)
1046                self._welu.ReportEvent(self.appname, id, cat, type, [msg])
1047            except (KeyboardInterrupt, SystemExit):
1048                raise
1049            except:
1050                self.handleError(record)
1051
1052    def close(self):
1053        """
1054        Clean up this handler.
1055
1056        You can remove the application name from the registry as a
1057        source of event log entries. However, if you do this, you will
1058        not be able to see the events as you intended in the Event Log
1059        Viewer - it needs to be able to access the registry to get the
1060        DLL name.
1061        """
1062        #self._welu.RemoveSourceFromRegistry(self.appname, self.logtype)
1063        logging.Handler.close(self)
1064
1065class HTTPHandler(logging.Handler):
1066    """
1067    A class which sends records to a Web server, using either GET or
1068    POST semantics.
1069    """
1070    def __init__(self, host, url, method="GET"):
1071        """
1072        Initialize the instance with the host, the request URL, and the method
1073        ("GET" or "POST")
1074        """
1075        logging.Handler.__init__(self)
1076        method = method.upper()
1077        if method not in ["GET", "POST"]:
1078            raise ValueError("method must be GET or POST")
1079        self.host = host
1080        self.url = url
1081        self.method = method
1082
1083    def mapLogRecord(self, record):
1084        """
1085        Default implementation of mapping the log record into a dict
1086        that is sent as the CGI data. Overwrite in your class.
1087        Contributed by Franz Glasner.
1088        """
1089        return record.__dict__
1090
1091    def emit(self, record):
1092        """
1093        Emit a record.
1094
1095        Send the record to the Web server as a percent-encoded dictionary
1096        """
1097        try:
1098            import httplib, urllib
1099            host = self.host
1100            h = httplib.HTTP(host)
1101            url = self.url
1102            data = urllib.urlencode(self.mapLogRecord(record))
1103            if self.method == "GET":
1104                if (url.find('?') >= 0):
1105                    sep = '&'
1106                else:
1107                    sep = '?'
1108                url = url + "%c%s" % (sep, data)
1109            h.putrequest(self.method, url)
1110            # support multiple hosts on one IP address...
1111            # need to strip optional :port from host, if present
1112            i = host.find(":")
1113            if i >= 0:
1114                host = host[:i]
1115            h.putheader("Host", host)
1116            if self.method == "POST":
1117                h.putheader("Content-type",
1118                            "application/x-www-form-urlencoded")
1119                h.putheader("Content-length", str(len(data)))
1120            h.endheaders(data if self.method == "POST" else None)
1121            h.getreply()    #can't do anything with the result
1122        except (KeyboardInterrupt, SystemExit):
1123            raise
1124        except:
1125            self.handleError(record)
1126
1127class BufferingHandler(logging.Handler):
1128    """
1129  A handler class which buffers logging records in memory. Whenever each
1130  record is added to the buffer, a check is made to see if the buffer should
1131  be flushed. If it should, then flush() is expected to do what's needed.
1132    """
1133    def __init__(self, capacity):
1134        """
1135        Initialize the handler with the buffer size.
1136        """
1137        logging.Handler.__init__(self)
1138        self.capacity = capacity
1139        self.buffer = []
1140
1141    def shouldFlush(self, record):
1142        """
1143        Should the handler flush its buffer?
1144
1145        Returns true if the buffer is up to capacity. This method can be
1146        overridden to implement custom flushing strategies.
1147        """
1148        return (len(self.buffer) >= self.capacity)
1149
1150    def emit(self, record):
1151        """
1152        Emit a record.
1153
1154        Append the record. If shouldFlush() tells us to, call flush() to process
1155        the buffer.
1156        """
1157        self.buffer.append(record)
1158        if self.shouldFlush(record):
1159            self.flush()
1160
1161    def flush(self):
1162        """
1163        Override to implement custom flushing behaviour.
1164
1165        This version just zaps the buffer to empty.
1166        """
1167        self.acquire()
1168        try:
1169            self.buffer = []
1170        finally:
1171            self.release()
1172
1173    def close(self):
1174        """
1175        Close the handler.
1176
1177        This version just flushes and chains to the parent class' close().
1178        """
1179        try:
1180            self.flush()
1181        finally:
1182            logging.Handler.close(self)
1183
1184class MemoryHandler(BufferingHandler):
1185    """
1186    A handler class which buffers logging records in memory, periodically
1187    flushing them to a target handler. Flushing occurs whenever the buffer
1188    is full, or when an event of a certain severity or greater is seen.
1189    """
1190    def __init__(self, capacity, flushLevel=logging.ERROR, target=None):
1191        """
1192        Initialize the handler with the buffer size, the level at which
1193        flushing should occur and an optional target.
1194
1195        Note that without a target being set either here or via setTarget(),
1196        a MemoryHandler is no use to anyone!
1197        """
1198        BufferingHandler.__init__(self, capacity)
1199        self.flushLevel = flushLevel
1200        self.target = target
1201
1202    def shouldFlush(self, record):
1203        """
1204        Check for buffer full or a record at the flushLevel or higher.
1205        """
1206        return (len(self.buffer) >= self.capacity) or \
1207                (record.levelno >= self.flushLevel)
1208
1209    def setTarget(self, target):
1210        """
1211        Set the target handler for this handler.
1212        """
1213        self.target = target
1214
1215    def flush(self):
1216        """
1217        For a MemoryHandler, flushing means just sending the buffered
1218        records to the target, if there is one. Override if you want
1219        different behaviour.
1220        """
1221        self.acquire()
1222        try:
1223            if self.target:
1224                for record in self.buffer:
1225                    self.target.handle(record)
1226                self.buffer = []
1227        finally:
1228            self.release()
1229
1230    def close(self):
1231        """
1232        Flush, set the target to None and lose the buffer.
1233        """
1234        try:
1235            self.flush()
1236        finally:
1237            self.acquire()
1238            try:
1239                self.target = None
1240                BufferingHandler.close(self)
1241            finally:
1242                self.release()
1243