1# Copyright 2001-2013 by Vinay Sajip. All Rights Reserved. 2# 3# Permission to use, copy, modify, and distribute this software and its 4# documentation for any purpose and without fee is hereby granted, 5# provided that the above copyright notice appear in all copies and that 6# both that copyright notice and this permission notice appear in 7# supporting documentation, and that the name of Vinay Sajip 8# not be used in advertising or publicity pertaining to distribution 9# of the software without specific, written prior permission. 10# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING 11# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL 12# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR 13# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER 14# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT 15# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 17""" 18Additional handlers for the logging package for Python. The core package is 19based on PEP 282 and comments thereto in comp.lang.python. 20 21Copyright (C) 2001-2013 Vinay Sajip. All Rights Reserved. 22 23To use, simply 'import logging.handlers' and log away! 24""" 25 26import errno, logging, socket, os, cPickle, struct, time, re 27from stat import ST_DEV, ST_INO, ST_MTIME 28 29try: 30 import codecs 31except ImportError: 32 codecs = None 33try: 34 unicode 35 _unicode = True 36except NameError: 37 _unicode = False 38 39# 40# Some constants... 41# 42 43DEFAULT_TCP_LOGGING_PORT = 9020 44DEFAULT_UDP_LOGGING_PORT = 9021 45DEFAULT_HTTP_LOGGING_PORT = 9022 46DEFAULT_SOAP_LOGGING_PORT = 9023 47SYSLOG_UDP_PORT = 514 48SYSLOG_TCP_PORT = 514 49 50_MIDNIGHT = 24 * 60 * 60 # number of seconds in a day 51 52class BaseRotatingHandler(logging.FileHandler): 53 """ 54 Base class for handlers that rotate log files at a certain point. 55 Not meant to be instantiated directly. Instead, use RotatingFileHandler 56 or TimedRotatingFileHandler. 57 """ 58 def __init__(self, filename, mode, encoding=None, delay=0): 59 """ 60 Use the specified filename for streamed logging 61 """ 62 if codecs is None: 63 encoding = None 64 logging.FileHandler.__init__(self, filename, mode, encoding, delay) 65 self.mode = mode 66 self.encoding = encoding 67 68 def emit(self, record): 69 """ 70 Emit a record. 71 72 Output the record to the file, catering for rollover as described 73 in doRollover(). 74 """ 75 try: 76 if self.shouldRollover(record): 77 self.doRollover() 78 logging.FileHandler.emit(self, record) 79 except (KeyboardInterrupt, SystemExit): 80 raise 81 except: 82 self.handleError(record) 83 84class RotatingFileHandler(BaseRotatingHandler): 85 """ 86 Handler for logging to a set of files, which switches from one file 87 to the next when the current file reaches a certain size. 88 """ 89 def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, encoding=None, delay=0): 90 """ 91 Open the specified file and use it as the stream for logging. 92 93 By default, the file grows indefinitely. You can specify particular 94 values of maxBytes and backupCount to allow the file to rollover at 95 a predetermined size. 96 97 Rollover occurs whenever the current log file is nearly maxBytes in 98 length. If backupCount is >= 1, the system will successively create 99 new files with the same pathname as the base file, but with extensions 100 ".1", ".2" etc. appended to it. For example, with a backupCount of 5 101 and a base file name of "app.log", you would get "app.log", 102 "app.log.1", "app.log.2", ... through to "app.log.5". The file being 103 written to is always "app.log" - when it gets filled up, it is closed 104 and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc. 105 exist, then they are renamed to "app.log.2", "app.log.3" etc. 106 respectively. 107 108 If maxBytes is zero, rollover never occurs. 109 """ 110 # If rotation/rollover is wanted, it doesn't make sense to use another 111 # mode. If for example 'w' were specified, then if there were multiple 112 # runs of the calling application, the logs from previous runs would be 113 # lost if the 'w' is respected, because the log file would be truncated 114 # on each run. 115 if maxBytes > 0: 116 mode = 'a' 117 BaseRotatingHandler.__init__(self, filename, mode, encoding, delay) 118 self.maxBytes = maxBytes 119 self.backupCount = backupCount 120 121 def doRollover(self): 122 """ 123 Do a rollover, as described in __init__(). 124 """ 125 if self.stream: 126 self.stream.close() 127 self.stream = None 128 if self.backupCount > 0: 129 for i in range(self.backupCount - 1, 0, -1): 130 sfn = "%s.%d" % (self.baseFilename, i) 131 dfn = "%s.%d" % (self.baseFilename, i + 1) 132 if os.path.exists(sfn): 133 #print "%s -> %s" % (sfn, dfn) 134 if os.path.exists(dfn): 135 os.remove(dfn) 136 os.rename(sfn, dfn) 137 dfn = self.baseFilename + ".1" 138 if os.path.exists(dfn): 139 os.remove(dfn) 140 # Issue 18940: A file may not have been created if delay is True. 141 if os.path.exists(self.baseFilename): 142 os.rename(self.baseFilename, dfn) 143 if not self.delay: 144 self.stream = self._open() 145 146 def shouldRollover(self, record): 147 """ 148 Determine if rollover should occur. 149 150 Basically, see if the supplied record would cause the file to exceed 151 the size limit we have. 152 """ 153 if self.stream is None: # delay was set... 154 self.stream = self._open() 155 if self.maxBytes > 0: # are we rolling over? 156 msg = "%s\n" % self.format(record) 157 self.stream.seek(0, 2) #due to non-posix-compliant Windows feature 158 if self.stream.tell() + len(msg) >= self.maxBytes: 159 return 1 160 return 0 161 162class TimedRotatingFileHandler(BaseRotatingHandler): 163 """ 164 Handler for logging to a file, rotating the log file at certain timed 165 intervals. 166 167 If backupCount is > 0, when rollover is done, no more than backupCount 168 files are kept - the oldest ones are deleted. 169 """ 170 def __init__(self, filename, when='h', interval=1, backupCount=0, encoding=None, delay=False, utc=False): 171 BaseRotatingHandler.__init__(self, filename, 'a', encoding, delay) 172 self.when = when.upper() 173 self.backupCount = backupCount 174 self.utc = utc 175 # Calculate the real rollover interval, which is just the number of 176 # seconds between rollovers. Also set the filename suffix used when 177 # a rollover occurs. Current 'when' events supported: 178 # S - Seconds 179 # M - Minutes 180 # H - Hours 181 # D - Days 182 # midnight - roll over at midnight 183 # W{0-6} - roll over on a certain day; 0 - Monday 184 # 185 # Case of the 'when' specifier is not important; lower or upper case 186 # will work. 187 if self.when == 'S': 188 self.interval = 1 # one second 189 self.suffix = "%Y-%m-%d_%H-%M-%S" 190 self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}$" 191 elif self.when == 'M': 192 self.interval = 60 # one minute 193 self.suffix = "%Y-%m-%d_%H-%M" 194 self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}$" 195 elif self.when == 'H': 196 self.interval = 60 * 60 # one hour 197 self.suffix = "%Y-%m-%d_%H" 198 self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}$" 199 elif self.when == 'D' or self.when == 'MIDNIGHT': 200 self.interval = 60 * 60 * 24 # one day 201 self.suffix = "%Y-%m-%d" 202 self.extMatch = r"^\d{4}-\d{2}-\d{2}$" 203 elif self.when.startswith('W'): 204 self.interval = 60 * 60 * 24 * 7 # one week 205 if len(self.when) != 2: 206 raise ValueError("You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s" % self.when) 207 if self.when[1] < '0' or self.when[1] > '6': 208 raise ValueError("Invalid day specified for weekly rollover: %s" % self.when) 209 self.dayOfWeek = int(self.when[1]) 210 self.suffix = "%Y-%m-%d" 211 self.extMatch = r"^\d{4}-\d{2}-\d{2}$" 212 else: 213 raise ValueError("Invalid rollover interval specified: %s" % self.when) 214 215 self.extMatch = re.compile(self.extMatch) 216 self.interval = self.interval * interval # multiply by units requested 217 if os.path.exists(filename): 218 t = os.stat(filename)[ST_MTIME] 219 else: 220 t = int(time.time()) 221 self.rolloverAt = self.computeRollover(t) 222 223 def computeRollover(self, currentTime): 224 """ 225 Work out the rollover time based on the specified time. 226 """ 227 result = currentTime + self.interval 228 # If we are rolling over at midnight or weekly, then the interval is already known. 229 # What we need to figure out is WHEN the next interval is. In other words, 230 # if you are rolling over at midnight, then your base interval is 1 day, 231 # but you want to start that one day clock at midnight, not now. So, we 232 # have to fudge the rolloverAt value in order to trigger the first rollover 233 # at the right time. After that, the regular interval will take care of 234 # the rest. Note that this code doesn't care about leap seconds. :) 235 if self.when == 'MIDNIGHT' or self.when.startswith('W'): 236 # This could be done with less code, but I wanted it to be clear 237 if self.utc: 238 t = time.gmtime(currentTime) 239 else: 240 t = time.localtime(currentTime) 241 currentHour = t[3] 242 currentMinute = t[4] 243 currentSecond = t[5] 244 # r is the number of seconds left between now and midnight 245 r = _MIDNIGHT - ((currentHour * 60 + currentMinute) * 60 + 246 currentSecond) 247 result = currentTime + r 248 # If we are rolling over on a certain day, add in the number of days until 249 # the next rollover, but offset by 1 since we just calculated the time 250 # until the next day starts. There are three cases: 251 # Case 1) The day to rollover is today; in this case, do nothing 252 # Case 2) The day to rollover is further in the interval (i.e., today is 253 # day 2 (Wednesday) and rollover is on day 6 (Sunday). Days to 254 # next rollover is simply 6 - 2 - 1, or 3. 255 # Case 3) The day to rollover is behind us in the interval (i.e., today 256 # is day 5 (Saturday) and rollover is on day 3 (Thursday). 257 # Days to rollover is 6 - 5 + 3, or 4. In this case, it's the 258 # number of days left in the current week (1) plus the number 259 # of days in the next week until the rollover day (3). 260 # The calculations described in 2) and 3) above need to have a day added. 261 # This is because the above time calculation takes us to midnight on this 262 # day, i.e. the start of the next day. 263 if self.when.startswith('W'): 264 day = t[6] # 0 is Monday 265 if day != self.dayOfWeek: 266 if day < self.dayOfWeek: 267 daysToWait = self.dayOfWeek - day 268 else: 269 daysToWait = 6 - day + self.dayOfWeek + 1 270 newRolloverAt = result + (daysToWait * (60 * 60 * 24)) 271 if not self.utc: 272 dstNow = t[-1] 273 dstAtRollover = time.localtime(newRolloverAt)[-1] 274 if dstNow != dstAtRollover: 275 if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour 276 addend = -3600 277 else: # DST bows out before next rollover, so we need to add an hour 278 addend = 3600 279 newRolloverAt += addend 280 result = newRolloverAt 281 return result 282 283 def shouldRollover(self, record): 284 """ 285 Determine if rollover should occur. 286 287 record is not used, as we are just comparing times, but it is needed so 288 the method signatures are the same 289 """ 290 t = int(time.time()) 291 if t >= self.rolloverAt: 292 return 1 293 #print "No need to rollover: %d, %d" % (t, self.rolloverAt) 294 return 0 295 296 def getFilesToDelete(self): 297 """ 298 Determine the files to delete when rolling over. 299 300 More specific than the earlier method, which just used glob.glob(). 301 """ 302 dirName, baseName = os.path.split(self.baseFilename) 303 fileNames = os.listdir(dirName) 304 result = [] 305 prefix = baseName + "." 306 plen = len(prefix) 307 for fileName in fileNames: 308 if fileName[:plen] == prefix: 309 suffix = fileName[plen:] 310 if self.extMatch.match(suffix): 311 result.append(os.path.join(dirName, fileName)) 312 result.sort() 313 if len(result) < self.backupCount: 314 result = [] 315 else: 316 result = result[:len(result) - self.backupCount] 317 return result 318 319 def doRollover(self): 320 """ 321 do a rollover; in this case, a date/time stamp is appended to the filename 322 when the rollover happens. However, you want the file to be named for the 323 start of the interval, not the current time. If there is a backup count, 324 then we have to get a list of matching filenames, sort them and remove 325 the one with the oldest suffix. 326 """ 327 if self.stream: 328 self.stream.close() 329 self.stream = None 330 # get the time that this sequence started at and make it a TimeTuple 331 currentTime = int(time.time()) 332 dstNow = time.localtime(currentTime)[-1] 333 t = self.rolloverAt - self.interval 334 if self.utc: 335 timeTuple = time.gmtime(t) 336 else: 337 timeTuple = time.localtime(t) 338 dstThen = timeTuple[-1] 339 if dstNow != dstThen: 340 if dstNow: 341 addend = 3600 342 else: 343 addend = -3600 344 timeTuple = time.localtime(t + addend) 345 dfn = self.baseFilename + "." + time.strftime(self.suffix, timeTuple) 346 if os.path.exists(dfn): 347 os.remove(dfn) 348 # Issue 18940: A file may not have been created if delay is True. 349 if os.path.exists(self.baseFilename): 350 os.rename(self.baseFilename, dfn) 351 if self.backupCount > 0: 352 for s in self.getFilesToDelete(): 353 os.remove(s) 354 if not self.delay: 355 self.stream = self._open() 356 newRolloverAt = self.computeRollover(currentTime) 357 while newRolloverAt <= currentTime: 358 newRolloverAt = newRolloverAt + self.interval 359 #If DST changes and midnight or weekly rollover, adjust for this. 360 if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc: 361 dstAtRollover = time.localtime(newRolloverAt)[-1] 362 if dstNow != dstAtRollover: 363 if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour 364 addend = -3600 365 else: # DST bows out before next rollover, so we need to add an hour 366 addend = 3600 367 newRolloverAt += addend 368 self.rolloverAt = newRolloverAt 369 370class WatchedFileHandler(logging.FileHandler): 371 """ 372 A handler for logging to a file, which watches the file 373 to see if it has changed while in use. This can happen because of 374 usage of programs such as newsyslog and logrotate which perform 375 log file rotation. This handler, intended for use under Unix, 376 watches the file to see if it has changed since the last emit. 377 (A file has changed if its device or inode have changed.) 378 If it has changed, the old file stream is closed, and the file 379 opened to get a new stream. 380 381 This handler is not appropriate for use under Windows, because 382 under Windows open files cannot be moved or renamed - logging 383 opens the files with exclusive locks - and so there is no need 384 for such a handler. Furthermore, ST_INO is not supported under 385 Windows; stat always returns zero for this value. 386 387 This handler is based on a suggestion and patch by Chad J. 388 Schroeder. 389 """ 390 def __init__(self, filename, mode='a', encoding=None, delay=0): 391 logging.FileHandler.__init__(self, filename, mode, encoding, delay) 392 self.dev, self.ino = -1, -1 393 self._statstream() 394 395 def _statstream(self): 396 if self.stream: 397 sres = os.fstat(self.stream.fileno()) 398 self.dev, self.ino = sres[ST_DEV], sres[ST_INO] 399 400 def emit(self, record): 401 """ 402 Emit a record. 403 404 First check if the underlying file has changed, and if it 405 has, close the old stream and reopen the file to get the 406 current stream. 407 """ 408 # Reduce the chance of race conditions by stat'ing by path only 409 # once and then fstat'ing our new fd if we opened a new log stream. 410 # See issue #14632: Thanks to John Mulligan for the problem report 411 # and patch. 412 try: 413 # stat the file by path, checking for existence 414 sres = os.stat(self.baseFilename) 415 except OSError as err: 416 if err.errno == errno.ENOENT: 417 sres = None 418 else: 419 raise 420 # compare file system stat with that of our stream file handle 421 if not sres or sres[ST_DEV] != self.dev or sres[ST_INO] != self.ino: 422 if self.stream is not None: 423 # we have an open file handle, clean it up 424 self.stream.flush() 425 self.stream.close() 426 self.stream = None # See Issue #21742: _open () might fail. 427 # open a new file handle and get new stat info from that fd 428 self.stream = self._open() 429 self._statstream() 430 logging.FileHandler.emit(self, record) 431 432class SocketHandler(logging.Handler): 433 """ 434 A handler class which writes logging records, in pickle format, to 435 a streaming socket. The socket is kept open across logging calls. 436 If the peer resets it, an attempt is made to reconnect on the next call. 437 The pickle which is sent is that of the LogRecord's attribute dictionary 438 (__dict__), so that the receiver does not need to have the logging module 439 installed in order to process the logging event. 440 441 To unpickle the record at the receiving end into a LogRecord, use the 442 makeLogRecord function. 443 """ 444 445 def __init__(self, host, port): 446 """ 447 Initializes the handler with a specific host address and port. 448 449 The attribute 'closeOnError' is set to 1 - which means that if 450 a socket error occurs, the socket is silently closed and then 451 reopened on the next logging call. 452 """ 453 logging.Handler.__init__(self) 454 self.host = host 455 self.port = port 456 self.sock = None 457 self.closeOnError = 0 458 self.retryTime = None 459 # 460 # Exponential backoff parameters. 461 # 462 self.retryStart = 1.0 463 self.retryMax = 30.0 464 self.retryFactor = 2.0 465 466 def makeSocket(self, timeout=1): 467 """ 468 A factory method which allows subclasses to define the precise 469 type of socket they want. 470 """ 471 s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 472 if hasattr(s, 'settimeout'): 473 s.settimeout(timeout) 474 s.connect((self.host, self.port)) 475 return s 476 477 def createSocket(self): 478 """ 479 Try to create a socket, using an exponential backoff with 480 a max retry time. Thanks to Robert Olson for the original patch 481 (SF #815911) which has been slightly refactored. 482 """ 483 now = time.time() 484 # Either retryTime is None, in which case this 485 # is the first time back after a disconnect, or 486 # we've waited long enough. 487 if self.retryTime is None: 488 attempt = 1 489 else: 490 attempt = (now >= self.retryTime) 491 if attempt: 492 try: 493 self.sock = self.makeSocket() 494 self.retryTime = None # next time, no delay before trying 495 except socket.error: 496 #Creation failed, so set the retry time and return. 497 if self.retryTime is None: 498 self.retryPeriod = self.retryStart 499 else: 500 self.retryPeriod = self.retryPeriod * self.retryFactor 501 if self.retryPeriod > self.retryMax: 502 self.retryPeriod = self.retryMax 503 self.retryTime = now + self.retryPeriod 504 505 def send(self, s): 506 """ 507 Send a pickled string to the socket. 508 509 This function allows for partial sends which can happen when the 510 network is busy. 511 """ 512 if self.sock is None: 513 self.createSocket() 514 #self.sock can be None either because we haven't reached the retry 515 #time yet, or because we have reached the retry time and retried, 516 #but are still unable to connect. 517 if self.sock: 518 try: 519 if hasattr(self.sock, "sendall"): 520 self.sock.sendall(s) 521 else: 522 sentsofar = 0 523 left = len(s) 524 while left > 0: 525 sent = self.sock.send(s[sentsofar:]) 526 sentsofar = sentsofar + sent 527 left = left - sent 528 except socket.error: 529 self.sock.close() 530 self.sock = None # so we can call createSocket next time 531 532 def makePickle(self, record): 533 """ 534 Pickles the record in binary format with a length prefix, and 535 returns it ready for transmission across the socket. 536 """ 537 ei = record.exc_info 538 if ei: 539 # just to get traceback text into record.exc_text ... 540 dummy = self.format(record) 541 record.exc_info = None # to avoid Unpickleable error 542 # See issue #14436: If msg or args are objects, they may not be 543 # available on the receiving end. So we convert the msg % args 544 # to a string, save it as msg and zap the args. 545 d = dict(record.__dict__) 546 d['msg'] = record.getMessage() 547 d['args'] = None 548 s = cPickle.dumps(d, 1) 549 if ei: 550 record.exc_info = ei # for next handler 551 slen = struct.pack(">L", len(s)) 552 return slen + s 553 554 def handleError(self, record): 555 """ 556 Handle an error during logging. 557 558 An error has occurred during logging. Most likely cause - 559 connection lost. Close the socket so that we can retry on the 560 next event. 561 """ 562 if self.closeOnError and self.sock: 563 self.sock.close() 564 self.sock = None #try to reconnect next time 565 else: 566 logging.Handler.handleError(self, record) 567 568 def emit(self, record): 569 """ 570 Emit a record. 571 572 Pickles the record and writes it to the socket in binary format. 573 If there is an error with the socket, silently drop the packet. 574 If there was a problem with the socket, re-establishes the 575 socket. 576 """ 577 try: 578 s = self.makePickle(record) 579 self.send(s) 580 except (KeyboardInterrupt, SystemExit): 581 raise 582 except: 583 self.handleError(record) 584 585 def close(self): 586 """ 587 Closes the socket. 588 """ 589 self.acquire() 590 try: 591 sock = self.sock 592 if sock: 593 self.sock = None 594 sock.close() 595 finally: 596 self.release() 597 logging.Handler.close(self) 598 599class DatagramHandler(SocketHandler): 600 """ 601 A handler class which writes logging records, in pickle format, to 602 a datagram socket. The pickle which is sent is that of the LogRecord's 603 attribute dictionary (__dict__), so that the receiver does not need to 604 have the logging module installed in order to process the logging event. 605 606 To unpickle the record at the receiving end into a LogRecord, use the 607 makeLogRecord function. 608 609 """ 610 def __init__(self, host, port): 611 """ 612 Initializes the handler with a specific host address and port. 613 """ 614 SocketHandler.__init__(self, host, port) 615 self.closeOnError = 0 616 617 def makeSocket(self): 618 """ 619 The factory method of SocketHandler is here overridden to create 620 a UDP socket (SOCK_DGRAM). 621 """ 622 s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) 623 return s 624 625 def send(self, s): 626 """ 627 Send a pickled string to a socket. 628 629 This function no longer allows for partial sends which can happen 630 when the network is busy - UDP does not guarantee delivery and 631 can deliver packets out of sequence. 632 """ 633 if self.sock is None: 634 self.createSocket() 635 self.sock.sendto(s, (self.host, self.port)) 636 637class SysLogHandler(logging.Handler): 638 """ 639 A handler class which sends formatted logging records to a syslog 640 server. Based on Sam Rushing's syslog module: 641 http://www.nightmare.com/squirl/python-ext/misc/syslog.py 642 Contributed by Nicolas Untz (after which minor refactoring changes 643 have been made). 644 """ 645 646 # from <linux/sys/syslog.h>: 647 # ====================================================================== 648 # priorities/facilities are encoded into a single 32-bit quantity, where 649 # the bottom 3 bits are the priority (0-7) and the top 28 bits are the 650 # facility (0-big number). Both the priorities and the facilities map 651 # roughly one-to-one to strings in the syslogd(8) source code. This 652 # mapping is included in this file. 653 # 654 # priorities (these are ordered) 655 656 LOG_EMERG = 0 # system is unusable 657 LOG_ALERT = 1 # action must be taken immediately 658 LOG_CRIT = 2 # critical conditions 659 LOG_ERR = 3 # error conditions 660 LOG_WARNING = 4 # warning conditions 661 LOG_NOTICE = 5 # normal but significant condition 662 LOG_INFO = 6 # informational 663 LOG_DEBUG = 7 # debug-level messages 664 665 # facility codes 666 LOG_KERN = 0 # kernel messages 667 LOG_USER = 1 # random user-level messages 668 LOG_MAIL = 2 # mail system 669 LOG_DAEMON = 3 # system daemons 670 LOG_AUTH = 4 # security/authorization messages 671 LOG_SYSLOG = 5 # messages generated internally by syslogd 672 LOG_LPR = 6 # line printer subsystem 673 LOG_NEWS = 7 # network news subsystem 674 LOG_UUCP = 8 # UUCP subsystem 675 LOG_CRON = 9 # clock daemon 676 LOG_AUTHPRIV = 10 # security/authorization messages (private) 677 LOG_FTP = 11 # FTP daemon 678 679 # other codes through 15 reserved for system use 680 LOG_LOCAL0 = 16 # reserved for local use 681 LOG_LOCAL1 = 17 # reserved for local use 682 LOG_LOCAL2 = 18 # reserved for local use 683 LOG_LOCAL3 = 19 # reserved for local use 684 LOG_LOCAL4 = 20 # reserved for local use 685 LOG_LOCAL5 = 21 # reserved for local use 686 LOG_LOCAL6 = 22 # reserved for local use 687 LOG_LOCAL7 = 23 # reserved for local use 688 689 priority_names = { 690 "alert": LOG_ALERT, 691 "crit": LOG_CRIT, 692 "critical": LOG_CRIT, 693 "debug": LOG_DEBUG, 694 "emerg": LOG_EMERG, 695 "err": LOG_ERR, 696 "error": LOG_ERR, # DEPRECATED 697 "info": LOG_INFO, 698 "notice": LOG_NOTICE, 699 "panic": LOG_EMERG, # DEPRECATED 700 "warn": LOG_WARNING, # DEPRECATED 701 "warning": LOG_WARNING, 702 } 703 704 facility_names = { 705 "auth": LOG_AUTH, 706 "authpriv": LOG_AUTHPRIV, 707 "cron": LOG_CRON, 708 "daemon": LOG_DAEMON, 709 "ftp": LOG_FTP, 710 "kern": LOG_KERN, 711 "lpr": LOG_LPR, 712 "mail": LOG_MAIL, 713 "news": LOG_NEWS, 714 "security": LOG_AUTH, # DEPRECATED 715 "syslog": LOG_SYSLOG, 716 "user": LOG_USER, 717 "uucp": LOG_UUCP, 718 "local0": LOG_LOCAL0, 719 "local1": LOG_LOCAL1, 720 "local2": LOG_LOCAL2, 721 "local3": LOG_LOCAL3, 722 "local4": LOG_LOCAL4, 723 "local5": LOG_LOCAL5, 724 "local6": LOG_LOCAL6, 725 "local7": LOG_LOCAL7, 726 } 727 728 #The map below appears to be trivially lowercasing the key. However, 729 #there's more to it than meets the eye - in some locales, lowercasing 730 #gives unexpected results. See SF #1524081: in the Turkish locale, 731 #"INFO".lower() != "info" 732 priority_map = { 733 "DEBUG" : "debug", 734 "INFO" : "info", 735 "WARNING" : "warning", 736 "ERROR" : "error", 737 "CRITICAL" : "critical" 738 } 739 740 def __init__(self, address=('localhost', SYSLOG_UDP_PORT), 741 facility=LOG_USER, socktype=None): 742 """ 743 Initialize a handler. 744 745 If address is specified as a string, a UNIX socket is used. To log to a 746 local syslogd, "SysLogHandler(address="/dev/log")" can be used. 747 If facility is not specified, LOG_USER is used. If socktype is 748 specified as socket.SOCK_DGRAM or socket.SOCK_STREAM, that specific 749 socket type will be used. For Unix sockets, you can also specify a 750 socktype of None, in which case socket.SOCK_DGRAM will be used, falling 751 back to socket.SOCK_STREAM. 752 """ 753 logging.Handler.__init__(self) 754 755 self.address = address 756 self.facility = facility 757 self.socktype = socktype 758 759 if isinstance(address, basestring): 760 self.unixsocket = 1 761 self._connect_unixsocket(address) 762 else: 763 self.unixsocket = 0 764 if socktype is None: 765 socktype = socket.SOCK_DGRAM 766 self.socket = socket.socket(socket.AF_INET, socktype) 767 if socktype == socket.SOCK_STREAM: 768 self.socket.connect(address) 769 self.socktype = socktype 770 self.formatter = None 771 772 def _connect_unixsocket(self, address): 773 use_socktype = self.socktype 774 if use_socktype is None: 775 use_socktype = socket.SOCK_DGRAM 776 self.socket = socket.socket(socket.AF_UNIX, use_socktype) 777 try: 778 self.socket.connect(address) 779 # it worked, so set self.socktype to the used type 780 self.socktype = use_socktype 781 except socket.error: 782 self.socket.close() 783 if self.socktype is not None: 784 # user didn't specify falling back, so fail 785 raise 786 use_socktype = socket.SOCK_STREAM 787 self.socket = socket.socket(socket.AF_UNIX, use_socktype) 788 try: 789 self.socket.connect(address) 790 # it worked, so set self.socktype to the used type 791 self.socktype = use_socktype 792 except socket.error: 793 self.socket.close() 794 raise 795 796 # curious: when talking to the unix-domain '/dev/log' socket, a 797 # zero-terminator seems to be required. this string is placed 798 # into a class variable so that it can be overridden if 799 # necessary. 800 log_format_string = '<%d>%s\000' 801 802 def encodePriority(self, facility, priority): 803 """ 804 Encode the facility and priority. You can pass in strings or 805 integers - if strings are passed, the facility_names and 806 priority_names mapping dictionaries are used to convert them to 807 integers. 808 """ 809 if isinstance(facility, basestring): 810 facility = self.facility_names[facility] 811 if isinstance(priority, basestring): 812 priority = self.priority_names[priority] 813 return (facility << 3) | priority 814 815 def close (self): 816 """ 817 Closes the socket. 818 """ 819 self.acquire() 820 try: 821 if self.unixsocket: 822 self.socket.close() 823 finally: 824 self.release() 825 logging.Handler.close(self) 826 827 def mapPriority(self, levelName): 828 """ 829 Map a logging level name to a key in the priority_names map. 830 This is useful in two scenarios: when custom levels are being 831 used, and in the case where you can't do a straightforward 832 mapping by lowercasing the logging level name because of locale- 833 specific issues (see SF #1524081). 834 """ 835 return self.priority_map.get(levelName, "warning") 836 837 def emit(self, record): 838 """ 839 Emit a record. 840 841 The record is formatted, and then sent to the syslog server. If 842 exception information is present, it is NOT sent to the server. 843 """ 844 try: 845 msg = self.format(record) + '\000' 846 """ 847 We need to convert record level to lowercase, maybe this will 848 change in the future. 849 """ 850 prio = '<%d>' % self.encodePriority(self.facility, 851 self.mapPriority(record.levelname)) 852 # Message is a string. Convert to bytes as required by RFC 5424 853 if type(msg) is unicode: 854 msg = msg.encode('utf-8') 855 msg = prio + msg 856 if self.unixsocket: 857 try: 858 self.socket.send(msg) 859 except socket.error: 860 self.socket.close() # See issue 17981 861 self._connect_unixsocket(self.address) 862 self.socket.send(msg) 863 elif self.socktype == socket.SOCK_DGRAM: 864 self.socket.sendto(msg, self.address) 865 else: 866 self.socket.sendall(msg) 867 except (KeyboardInterrupt, SystemExit): 868 raise 869 except: 870 self.handleError(record) 871 872class SMTPHandler(logging.Handler): 873 """ 874 A handler class which sends an SMTP email for each logging event. 875 """ 876 def __init__(self, mailhost, fromaddr, toaddrs, subject, 877 credentials=None, secure=None): 878 """ 879 Initialize the handler. 880 881 Initialize the instance with the from and to addresses and subject 882 line of the email. To specify a non-standard SMTP port, use the 883 (host, port) tuple format for the mailhost argument. To specify 884 authentication credentials, supply a (username, password) tuple 885 for the credentials argument. To specify the use of a secure 886 protocol (TLS), pass in a tuple for the secure argument. This will 887 only be used when authentication credentials are supplied. The tuple 888 will be either an empty tuple, or a single-value tuple with the name 889 of a keyfile, or a 2-value tuple with the names of the keyfile and 890 certificate file. (This tuple is passed to the `starttls` method). 891 """ 892 logging.Handler.__init__(self) 893 if isinstance(mailhost, (list, tuple)): 894 self.mailhost, self.mailport = mailhost 895 else: 896 self.mailhost, self.mailport = mailhost, None 897 if isinstance(credentials, (list, tuple)): 898 self.username, self.password = credentials 899 else: 900 self.username = None 901 self.fromaddr = fromaddr 902 if isinstance(toaddrs, basestring): 903 toaddrs = [toaddrs] 904 self.toaddrs = toaddrs 905 self.subject = subject 906 self.secure = secure 907 self._timeout = 5.0 908 909 def getSubject(self, record): 910 """ 911 Determine the subject for the email. 912 913 If you want to specify a subject line which is record-dependent, 914 override this method. 915 """ 916 return self.subject 917 918 def emit(self, record): 919 """ 920 Emit a record. 921 922 Format the record and send it to the specified addressees. 923 """ 924 try: 925 import smtplib 926 from email.utils import formatdate 927 port = self.mailport 928 if not port: 929 port = smtplib.SMTP_PORT 930 smtp = smtplib.SMTP(self.mailhost, port, timeout=self._timeout) 931 msg = self.format(record) 932 msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % ( 933 self.fromaddr, 934 ",".join(self.toaddrs), 935 self.getSubject(record), 936 formatdate(), msg) 937 if self.username: 938 if self.secure is not None: 939 smtp.ehlo() 940 smtp.starttls(*self.secure) 941 smtp.ehlo() 942 smtp.login(self.username, self.password) 943 smtp.sendmail(self.fromaddr, self.toaddrs, msg) 944 smtp.quit() 945 except (KeyboardInterrupt, SystemExit): 946 raise 947 except: 948 self.handleError(record) 949 950class NTEventLogHandler(logging.Handler): 951 """ 952 A handler class which sends events to the NT Event Log. Adds a 953 registry entry for the specified application name. If no dllname is 954 provided, win32service.pyd (which contains some basic message 955 placeholders) is used. Note that use of these placeholders will make 956 your event logs big, as the entire message source is held in the log. 957 If you want slimmer logs, you have to pass in the name of your own DLL 958 which contains the message definitions you want to use in the event log. 959 """ 960 def __init__(self, appname, dllname=None, logtype="Application"): 961 logging.Handler.__init__(self) 962 try: 963 import win32evtlogutil, win32evtlog 964 self.appname = appname 965 self._welu = win32evtlogutil 966 if not dllname: 967 dllname = os.path.split(self._welu.__file__) 968 dllname = os.path.split(dllname[0]) 969 dllname = os.path.join(dllname[0], r'win32service.pyd') 970 self.dllname = dllname 971 self.logtype = logtype 972 self._welu.AddSourceToRegistry(appname, dllname, logtype) 973 self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE 974 self.typemap = { 975 logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE, 976 logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE, 977 logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE, 978 logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE, 979 logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE, 980 } 981 except ImportError: 982 print("The Python Win32 extensions for NT (service, event "\ 983 "logging) appear not to be available.") 984 self._welu = None 985 986 def getMessageID(self, record): 987 """ 988 Return the message ID for the event record. If you are using your 989 own messages, you could do this by having the msg passed to the 990 logger being an ID rather than a formatting string. Then, in here, 991 you could use a dictionary lookup to get the message ID. This 992 version returns 1, which is the base message ID in win32service.pyd. 993 """ 994 return 1 995 996 def getEventCategory(self, record): 997 """ 998 Return the event category for the record. 999 1000 Override this if you want to specify your own categories. This version 1001 returns 0. 1002 """ 1003 return 0 1004 1005 def getEventType(self, record): 1006 """ 1007 Return the event type for the record. 1008 1009 Override this if you want to specify your own types. This version does 1010 a mapping using the handler's typemap attribute, which is set up in 1011 __init__() to a dictionary which contains mappings for DEBUG, INFO, 1012 WARNING, ERROR and CRITICAL. If you are using your own levels you will 1013 either need to override this method or place a suitable dictionary in 1014 the handler's typemap attribute. 1015 """ 1016 return self.typemap.get(record.levelno, self.deftype) 1017 1018 def emit(self, record): 1019 """ 1020 Emit a record. 1021 1022 Determine the message ID, event category and event type. Then 1023 log the message in the NT event log. 1024 """ 1025 if self._welu: 1026 try: 1027 id = self.getMessageID(record) 1028 cat = self.getEventCategory(record) 1029 type = self.getEventType(record) 1030 msg = self.format(record) 1031 self._welu.ReportEvent(self.appname, id, cat, type, [msg]) 1032 except (KeyboardInterrupt, SystemExit): 1033 raise 1034 except: 1035 self.handleError(record) 1036 1037 def close(self): 1038 """ 1039 Clean up this handler. 1040 1041 You can remove the application name from the registry as a 1042 source of event log entries. However, if you do this, you will 1043 not be able to see the events as you intended in the Event Log 1044 Viewer - it needs to be able to access the registry to get the 1045 DLL name. 1046 """ 1047 #self._welu.RemoveSourceFromRegistry(self.appname, self.logtype) 1048 logging.Handler.close(self) 1049 1050class HTTPHandler(logging.Handler): 1051 """ 1052 A class which sends records to a Web server, using either GET or 1053 POST semantics. 1054 """ 1055 def __init__(self, host, url, method="GET"): 1056 """ 1057 Initialize the instance with the host, the request URL, and the method 1058 ("GET" or "POST") 1059 """ 1060 logging.Handler.__init__(self) 1061 method = method.upper() 1062 if method not in ["GET", "POST"]: 1063 raise ValueError("method must be GET or POST") 1064 self.host = host 1065 self.url = url 1066 self.method = method 1067 1068 def mapLogRecord(self, record): 1069 """ 1070 Default implementation of mapping the log record into a dict 1071 that is sent as the CGI data. Overwrite in your class. 1072 Contributed by Franz Glasner. 1073 """ 1074 return record.__dict__ 1075 1076 def emit(self, record): 1077 """ 1078 Emit a record. 1079 1080 Send the record to the Web server as a percent-encoded dictionary 1081 """ 1082 try: 1083 import httplib, urllib 1084 host = self.host 1085 h = httplib.HTTP(host) 1086 url = self.url 1087 data = urllib.urlencode(self.mapLogRecord(record)) 1088 if self.method == "GET": 1089 if (url.find('?') >= 0): 1090 sep = '&' 1091 else: 1092 sep = '?' 1093 url = url + "%c%s" % (sep, data) 1094 h.putrequest(self.method, url) 1095 # support multiple hosts on one IP address... 1096 # need to strip optional :port from host, if present 1097 i = host.find(":") 1098 if i >= 0: 1099 host = host[:i] 1100 h.putheader("Host", host) 1101 if self.method == "POST": 1102 h.putheader("Content-type", 1103 "application/x-www-form-urlencoded") 1104 h.putheader("Content-length", str(len(data))) 1105 h.endheaders(data if self.method == "POST" else None) 1106 h.getreply() #can't do anything with the result 1107 except (KeyboardInterrupt, SystemExit): 1108 raise 1109 except: 1110 self.handleError(record) 1111 1112class BufferingHandler(logging.Handler): 1113 """ 1114 A handler class which buffers logging records in memory. Whenever each 1115 record is added to the buffer, a check is made to see if the buffer should 1116 be flushed. If it should, then flush() is expected to do what's needed. 1117 """ 1118 def __init__(self, capacity): 1119 """ 1120 Initialize the handler with the buffer size. 1121 """ 1122 logging.Handler.__init__(self) 1123 self.capacity = capacity 1124 self.buffer = [] 1125 1126 def shouldFlush(self, record): 1127 """ 1128 Should the handler flush its buffer? 1129 1130 Returns true if the buffer is up to capacity. This method can be 1131 overridden to implement custom flushing strategies. 1132 """ 1133 return (len(self.buffer) >= self.capacity) 1134 1135 def emit(self, record): 1136 """ 1137 Emit a record. 1138 1139 Append the record. If shouldFlush() tells us to, call flush() to process 1140 the buffer. 1141 """ 1142 self.buffer.append(record) 1143 if self.shouldFlush(record): 1144 self.flush() 1145 1146 def flush(self): 1147 """ 1148 Override to implement custom flushing behaviour. 1149 1150 This version just zaps the buffer to empty. 1151 """ 1152 self.acquire() 1153 try: 1154 self.buffer = [] 1155 finally: 1156 self.release() 1157 1158 def close(self): 1159 """ 1160 Close the handler. 1161 1162 This version just flushes and chains to the parent class' close(). 1163 """ 1164 try: 1165 self.flush() 1166 finally: 1167 logging.Handler.close(self) 1168 1169class MemoryHandler(BufferingHandler): 1170 """ 1171 A handler class which buffers logging records in memory, periodically 1172 flushing them to a target handler. Flushing occurs whenever the buffer 1173 is full, or when an event of a certain severity or greater is seen. 1174 """ 1175 def __init__(self, capacity, flushLevel=logging.ERROR, target=None): 1176 """ 1177 Initialize the handler with the buffer size, the level at which 1178 flushing should occur and an optional target. 1179 1180 Note that without a target being set either here or via setTarget(), 1181 a MemoryHandler is no use to anyone! 1182 """ 1183 BufferingHandler.__init__(self, capacity) 1184 self.flushLevel = flushLevel 1185 self.target = target 1186 1187 def shouldFlush(self, record): 1188 """ 1189 Check for buffer full or a record at the flushLevel or higher. 1190 """ 1191 return (len(self.buffer) >= self.capacity) or \ 1192 (record.levelno >= self.flushLevel) 1193 1194 def setTarget(self, target): 1195 """ 1196 Set the target handler for this handler. 1197 """ 1198 self.target = target 1199 1200 def flush(self): 1201 """ 1202 For a MemoryHandler, flushing means just sending the buffered 1203 records to the target, if there is one. Override if you want 1204 different behaviour. 1205 """ 1206 self.acquire() 1207 try: 1208 if self.target: 1209 for record in self.buffer: 1210 self.target.handle(record) 1211 self.buffer = [] 1212 finally: 1213 self.release() 1214 1215 def close(self): 1216 """ 1217 Flush, set the target to None and lose the buffer. 1218 """ 1219 try: 1220 self.flush() 1221 finally: 1222 self.acquire() 1223 try: 1224 self.target = None 1225 BufferingHandler.close(self) 1226 finally: 1227 self.release() 1228