• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2017 The Abseil Authors.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#      http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15"""Abseil Python logging module implemented on top of standard logging.
16
17Simple usage::
18
19    from absl import logging
20
21    logging.info('Interesting Stuff')
22    logging.info('Interesting Stuff with Arguments: %d', 42)
23
24    logging.set_verbosity(logging.INFO)
25    logging.log(logging.DEBUG, 'This will *not* be printed')
26    logging.set_verbosity(logging.DEBUG)
27    logging.log(logging.DEBUG, 'This will be printed')
28
29    logging.warning('Worrying Stuff')
30    logging.error('Alarming Stuff')
31    logging.fatal('AAAAHHHHH!!!!')  # Process exits.
32
33Usage note: Do not pre-format the strings in your program code.
34Instead, let the logging module perform argument interpolation.
35This saves cycles because strings that don't need to be printed
36are never formatted.  Note that this module does not attempt to
37interpolate arguments when no arguments are given.  In other words::
38
39    logging.info('Interesting Stuff: %s')
40
41does not raise an exception because logging.info() has only one
42argument, the message string.
43
44"Lazy" evaluation for debugging
45-------------------------------
46
47If you do something like this::
48
49    logging.debug('Thing: %s', thing.ExpensiveOp())
50
51then the ExpensiveOp will be evaluated even if nothing
52is printed to the log. To avoid this, use the level_debug() function::
53
54  if logging.level_debug():
55    logging.debug('Thing: %s', thing.ExpensiveOp())
56
57Per file level logging is supported by logging.vlog() and
58logging.vlog_is_on(). For example::
59
60    if logging.vlog_is_on(2):
61      logging.vlog(2, very_expensive_debug_message())
62
63Notes on Unicode
64----------------
65
66The log output is encoded as UTF-8.  Don't pass data in other encodings in
67bytes() instances -- instead pass unicode string instances when you need to
68(for both the format string and arguments).
69
70Note on critical and fatal:
71Standard logging module defines fatal as an alias to critical, but it's not
72documented, and it does NOT actually terminate the program.
73This module only defines fatal but not critical, and it DOES terminate the
74program.
75
76The differences in behavior are historical and unfortunate.
77"""
78
79import collections
80from collections import abc
81import getpass
82import io
83import itertools
84import logging
85import os
86import socket
87import struct
88import sys
89import threading
90import time
91import timeit
92import traceback
93import types
94import warnings
95
96from absl import flags
97from absl.logging import converter
98
99try:
100  from typing import NoReturn
101except ImportError:
102  pass
103
104
105FLAGS = flags.FLAGS
106
107
108# Logging levels.
109FATAL = converter.ABSL_FATAL
110ERROR = converter.ABSL_ERROR
111WARNING = converter.ABSL_WARNING
112WARN = converter.ABSL_WARNING  # Deprecated name.
113INFO = converter.ABSL_INFO
114DEBUG = converter.ABSL_DEBUG
115
116# Regex to match/parse log line prefixes.
117ABSL_LOGGING_PREFIX_REGEX = (
118    r'^(?P<severity>[IWEF])'
119    r'(?P<month>\d\d)(?P<day>\d\d) '
120    r'(?P<hour>\d\d):(?P<minute>\d\d):(?P<second>\d\d)'
121    r'\.(?P<microsecond>\d\d\d\d\d\d) +'
122    r'(?P<thread_id>-?\d+) '
123    r'(?P<filename>[a-zA-Z<][\w._<>-]+):(?P<line>\d+)')
124
125
126# Mask to convert integer thread ids to unsigned quantities for logging purposes
127_THREAD_ID_MASK = 2 ** (struct.calcsize('L') * 8) - 1
128
129# Extra property set on the LogRecord created by ABSLLogger when its level is
130# CRITICAL/FATAL.
131_ABSL_LOG_FATAL = '_absl_log_fatal'
132# Extra prefix added to the log message when a non-absl logger logs a
133# CRITICAL/FATAL message.
134_CRITICAL_PREFIX = 'CRITICAL - '
135
136# Used by findCaller to skip callers from */logging/__init__.py.
137_LOGGING_FILE_PREFIX = os.path.join('logging', '__init__.')
138
139# The ABSL logger instance, initialized in _initialize().
140_absl_logger = None
141# The ABSL handler instance, initialized in _initialize().
142_absl_handler = None
143
144
145_CPP_NAME_TO_LEVELS = {
146    'debug': '0',  # Abseil C++ has no DEBUG level, mapping it to INFO here.
147    'info': '0',
148    'warning': '1',
149    'warn': '1',
150    'error': '2',
151    'fatal': '3'
152}
153
154_CPP_LEVEL_TO_NAMES = {
155    '0': 'info',
156    '1': 'warning',
157    '2': 'error',
158    '3': 'fatal',
159}
160
161
162class _VerbosityFlag(flags.Flag):
163  """Flag class for -v/--verbosity."""
164
165  def __init__(self, *args, **kwargs):
166    super(_VerbosityFlag, self).__init__(
167        flags.IntegerParser(),
168        flags.ArgumentSerializer(),
169        *args, **kwargs)
170
171  @property
172  def value(self):
173    return self._value
174
175  @value.setter
176  def value(self, v):
177    self._value = v
178    self._update_logging_levels()
179
180  def _update_logging_levels(self):
181    """Updates absl logging levels to the current verbosity.
182
183    Visibility: module-private
184    """
185    if not _absl_logger:
186      return
187
188    if self._value <= converter.ABSL_DEBUG:
189      standard_verbosity = converter.absl_to_standard(self._value)
190    else:
191      # --verbosity is set to higher than 1 for vlog.
192      standard_verbosity = logging.DEBUG - (self._value - 1)
193
194    # Also update root level when absl_handler is used.
195    if _absl_handler in logging.root.handlers:
196      # Make absl logger inherit from the root logger. absl logger might have
197      # a non-NOTSET value if logging.set_verbosity() is called at import time.
198      _absl_logger.setLevel(logging.NOTSET)
199      logging.root.setLevel(standard_verbosity)
200    else:
201      _absl_logger.setLevel(standard_verbosity)
202
203
204class _LoggerLevelsFlag(flags.Flag):
205  """Flag class for --logger_levels."""
206
207  def __init__(self, *args, **kwargs):
208    super(_LoggerLevelsFlag, self).__init__(
209        _LoggerLevelsParser(),
210        _LoggerLevelsSerializer(),
211        *args, **kwargs)
212
213  @property
214  def value(self):
215    # For lack of an immutable type, be defensive and return a copy.
216    # Modifications to the dict aren't supported and won't have any affect.
217    # While Py3 could use MappingProxyType, that isn't deepcopy friendly, so
218    # just return a copy.
219    return self._value.copy()
220
221  @value.setter
222  def value(self, v):
223    self._value = {} if v is None else v
224    self._update_logger_levels()
225
226  def _update_logger_levels(self):
227    # Visibility: module-private.
228    # This is called by absl.app.run() during initialization.
229    for name, level in self._value.items():
230      logging.getLogger(name).setLevel(level)
231
232
233class _LoggerLevelsParser(flags.ArgumentParser):
234  """Parser for --logger_levels flag."""
235
236  def parse(self, value):
237    if isinstance(value, abc.Mapping):
238      return value
239
240    pairs = [pair.strip() for pair in value.split(',') if pair.strip()]
241
242    # Preserve the order so that serialization is deterministic.
243    levels = collections.OrderedDict()
244    for name_level in pairs:
245      name, level = name_level.split(':', 1)
246      name = name.strip()
247      level = level.strip()
248      levels[name] = level
249    return levels
250
251
252class _LoggerLevelsSerializer(object):
253  """Serializer for --logger_levels flag."""
254
255  def serialize(self, value):
256    if isinstance(value, str):
257      return value
258    return ','.join(
259        '{}:{}'.format(name, level) for name, level in value.items())
260
261
262class _StderrthresholdFlag(flags.Flag):
263  """Flag class for --stderrthreshold."""
264
265  def __init__(self, *args, **kwargs):
266    super(_StderrthresholdFlag, self).__init__(
267        flags.ArgumentParser(),
268        flags.ArgumentSerializer(),
269        *args, **kwargs)
270
271  @property
272  def value(self):
273    return self._value
274
275  @value.setter
276  def value(self, v):
277    if v in _CPP_LEVEL_TO_NAMES:
278      # --stderrthreshold also accepts numeric strings whose values are
279      # Abseil C++ log levels.
280      cpp_value = int(v)
281      v = _CPP_LEVEL_TO_NAMES[v]  # Normalize to strings.
282    elif v.lower() in _CPP_NAME_TO_LEVELS:
283      v = v.lower()
284      if v == 'warn':
285        v = 'warning'  # Use 'warning' as the canonical name.
286      cpp_value = int(_CPP_NAME_TO_LEVELS[v])
287    else:
288      raise ValueError(
289          '--stderrthreshold must be one of (case-insensitive) '
290          "'debug', 'info', 'warning', 'error', 'fatal', "
291          "or '0', '1', '2', '3', not '%s'" % v)
292
293    self._value = v
294
295
296flags.DEFINE_boolean('logtostderr',
297                     False,
298                     'Should only log to stderr?', allow_override_cpp=True)
299flags.DEFINE_boolean('alsologtostderr',
300                     False,
301                     'also log to stderr?', allow_override_cpp=True)
302flags.DEFINE_string('log_dir',
303                    os.getenv('TEST_TMPDIR', ''),
304                    'directory to write logfiles into',
305                    allow_override_cpp=True)
306flags.DEFINE_flag(_VerbosityFlag(
307    'verbosity', -1,
308    'Logging verbosity level. Messages logged at this level or lower will '
309    'be included. Set to 1 for debug logging. If the flag was not set or '
310    'supplied, the value will be changed from the default of -1 (warning) to '
311    '0 (info) after flags are parsed.',
312    short_name='v', allow_hide_cpp=True))
313flags.DEFINE_flag(
314    _LoggerLevelsFlag(
315        'logger_levels', {},
316        'Specify log level of loggers. The format is a CSV list of '
317        '`name:level`. Where `name` is the logger name used with '
318        '`logging.getLogger()`, and `level` is a level name  (INFO, DEBUG, '
319        'etc). e.g. `myapp.foo:INFO,other.logger:DEBUG`'))
320flags.DEFINE_flag(_StderrthresholdFlag(
321    'stderrthreshold', 'fatal',
322    'log messages at this level, or more severe, to stderr in '
323    'addition to the logfile.  Possible values are '
324    "'debug', 'info', 'warning', 'error', and 'fatal'.  "
325    'Obsoletes --alsologtostderr. Using --alsologtostderr '
326    'cancels the effect of this flag. Please also note that '
327    'this flag is subject to --verbosity and requires logfile '
328    'not be stderr.', allow_hide_cpp=True))
329flags.DEFINE_boolean('showprefixforinfo', True,
330                     'If False, do not prepend prefix to info messages '
331                     'when it\'s logged to stderr, '
332                     '--verbosity is set to INFO level, '
333                     'and python logging is used.')
334
335
336def get_verbosity():
337  """Returns the logging verbosity."""
338  return FLAGS['verbosity'].value
339
340
341def set_verbosity(v):
342  """Sets the logging verbosity.
343
344  Causes all messages of level <= v to be logged,
345  and all messages of level > v to be silently discarded.
346
347  Args:
348    v: int|str, the verbosity level as an integer or string. Legal string values
349        are those that can be coerced to an integer as well as case-insensitive
350        'debug', 'info', 'warning', 'error', and 'fatal'.
351  """
352  try:
353    new_level = int(v)
354  except ValueError:
355    new_level = converter.ABSL_NAMES[v.upper()]
356  FLAGS.verbosity = new_level
357
358
359def set_stderrthreshold(s):
360  """Sets the stderr threshold to the value passed in.
361
362  Args:
363    s: str|int, valid strings values are case-insensitive 'debug',
364        'info', 'warning', 'error', and 'fatal'; valid integer values are
365        logging.DEBUG|INFO|WARNING|ERROR|FATAL.
366
367  Raises:
368      ValueError: Raised when s is an invalid value.
369  """
370  if s in converter.ABSL_LEVELS:
371    FLAGS.stderrthreshold = converter.ABSL_LEVELS[s]
372  elif isinstance(s, str) and s.upper() in converter.ABSL_NAMES:
373    FLAGS.stderrthreshold = s
374  else:
375    raise ValueError(
376        'set_stderrthreshold only accepts integer absl logging level '
377        'from -3 to 1, or case-insensitive string values '
378        "'debug', 'info', 'warning', 'error', and 'fatal'. "
379        'But found "{}" ({}).'.format(s, type(s)))
380
381
382def fatal(msg, *args, **kwargs):
383  # type: (Any, Any, Any) -> NoReturn
384  """Logs a fatal message."""
385  log(FATAL, msg, *args, **kwargs)
386
387
388def error(msg, *args, **kwargs):
389  """Logs an error message."""
390  log(ERROR, msg, *args, **kwargs)
391
392
393def warning(msg, *args, **kwargs):
394  """Logs a warning message."""
395  log(WARNING, msg, *args, **kwargs)
396
397
398def warn(msg, *args, **kwargs):
399  """Deprecated, use 'warning' instead."""
400  warnings.warn("The 'warn' function is deprecated, use 'warning' instead",
401                DeprecationWarning, 2)
402  log(WARNING, msg, *args, **kwargs)
403
404
405def info(msg, *args, **kwargs):
406  """Logs an info message."""
407  log(INFO, msg, *args, **kwargs)
408
409
410def debug(msg, *args, **kwargs):
411  """Logs a debug message."""
412  log(DEBUG, msg, *args, **kwargs)
413
414
415def exception(msg, *args, **kwargs):
416  """Logs an exception, with traceback and message."""
417  error(msg, *args, **kwargs, exc_info=True)
418
419
420# Counter to keep track of number of log entries per token.
421_log_counter_per_token = {}
422
423
424def _get_next_log_count_per_token(token):
425  """Wrapper for _log_counter_per_token. Thread-safe.
426
427  Args:
428    token: The token for which to look up the count.
429
430  Returns:
431    The number of times this function has been called with
432    *token* as an argument (starting at 0).
433  """
434  # Can't use a defaultdict because defaultdict isn't atomic, whereas
435  # setdefault is.
436  return next(_log_counter_per_token.setdefault(token, itertools.count()))
437
438
439def log_every_n(level, msg, n, *args):
440  """Logs ``msg % args`` at level 'level' once per 'n' times.
441
442  Logs the 1st call, (N+1)st call, (2N+1)st call,  etc.
443  Not threadsafe.
444
445  Args:
446    level: int, the absl logging level at which to log.
447    msg: str, the message to be logged.
448    n: int, the number of times this should be called before it is logged.
449    *args: The args to be substituted into the msg.
450  """
451  count = _get_next_log_count_per_token(get_absl_logger().findCaller())
452  log_if(level, msg, not (count % n), *args)
453
454
455# Keeps track of the last log time of the given token.
456# Note: must be a dict since set/get is atomic in CPython.
457# Note: entries are never released as their number is expected to be low.
458_log_timer_per_token = {}
459
460
461def _seconds_have_elapsed(token, num_seconds):
462  """Tests if 'num_seconds' have passed since 'token' was requested.
463
464  Not strictly thread-safe - may log with the wrong frequency if called
465  concurrently from multiple threads. Accuracy depends on resolution of
466  'timeit.default_timer()'.
467
468  Always returns True on the first call for a given 'token'.
469
470  Args:
471    token: The token for which to look up the count.
472    num_seconds: The number of seconds to test for.
473
474  Returns:
475    Whether it has been >= 'num_seconds' since 'token' was last requested.
476  """
477  now = timeit.default_timer()
478  then = _log_timer_per_token.get(token, None)
479  if then is None or (now - then) >= num_seconds:
480    _log_timer_per_token[token] = now
481    return True
482  else:
483    return False
484
485
486def log_every_n_seconds(level, msg, n_seconds, *args):
487  """Logs ``msg % args`` at level ``level`` iff ``n_seconds`` elapsed since last call.
488
489  Logs the first call, logs subsequent calls if 'n' seconds have elapsed since
490  the last logging call from the same call site (file + line). Not thread-safe.
491
492  Args:
493    level: int, the absl logging level at which to log.
494    msg: str, the message to be logged.
495    n_seconds: float or int, seconds which should elapse before logging again.
496    *args: The args to be substituted into the msg.
497  """
498  should_log = _seconds_have_elapsed(get_absl_logger().findCaller(), n_seconds)
499  log_if(level, msg, should_log, *args)
500
501
502def log_first_n(level, msg, n, *args):
503  """Logs ``msg % args`` at level ``level`` only first ``n`` times.
504
505  Not threadsafe.
506
507  Args:
508    level: int, the absl logging level at which to log.
509    msg: str, the message to be logged.
510    n: int, the maximal number of times the message is logged.
511    *args: The args to be substituted into the msg.
512  """
513  count = _get_next_log_count_per_token(get_absl_logger().findCaller())
514  log_if(level, msg, count < n, *args)
515
516
517def log_if(level, msg, condition, *args):
518  """Logs ``msg % args`` at level ``level`` only if condition is fulfilled."""
519  if condition:
520    log(level, msg, *args)
521
522
523def log(level, msg, *args, **kwargs):
524  """Logs ``msg % args`` at absl logging level ``level``.
525
526  If no args are given just print msg, ignoring any interpolation specifiers.
527
528  Args:
529    level: int, the absl logging level at which to log the message
530        (logging.DEBUG|INFO|WARNING|ERROR|FATAL). While some C++ verbose logging
531        level constants are also supported, callers should prefer explicit
532        logging.vlog() calls for such purpose.
533
534    msg: str, the message to be logged.
535    *args: The args to be substituted into the msg.
536    **kwargs: May contain exc_info to add exception traceback to message.
537  """
538  if level > converter.ABSL_DEBUG:
539    # Even though this function supports level that is greater than 1, users
540    # should use logging.vlog instead for such cases.
541    # Treat this as vlog, 1 is equivalent to DEBUG.
542    standard_level = converter.STANDARD_DEBUG - (level - 1)
543  else:
544    if level < converter.ABSL_FATAL:
545      level = converter.ABSL_FATAL
546    standard_level = converter.absl_to_standard(level)
547
548  # Match standard logging's behavior. Before use_absl_handler() and
549  # logging is configured, there is no handler attached on _absl_logger nor
550  # logging.root. So logs go no where.
551  if not logging.root.handlers:
552    logging.basicConfig()
553
554  _absl_logger.log(standard_level, msg, *args, **kwargs)
555
556
557def vlog(level, msg, *args, **kwargs):
558  """Log ``msg % args`` at C++ vlog level ``level``.
559
560  Args:
561    level: int, the C++ verbose logging level at which to log the message,
562        e.g. 1, 2, 3, 4... While absl level constants are also supported,
563        callers should prefer logging.log|debug|info|... calls for such purpose.
564    msg: str, the message to be logged.
565    *args: The args to be substituted into the msg.
566    **kwargs: May contain exc_info to add exception traceback to message.
567  """
568  log(level, msg, *args, **kwargs)
569
570
571def vlog_is_on(level):
572  """Checks if vlog is enabled for the given level in caller's source file.
573
574  Args:
575    level: int, the C++ verbose logging level at which to log the message,
576        e.g. 1, 2, 3, 4... While absl level constants are also supported,
577        callers should prefer level_debug|level_info|... calls for
578        checking those.
579
580  Returns:
581    True if logging is turned on for that level.
582  """
583
584  if level > converter.ABSL_DEBUG:
585    # Even though this function supports level that is greater than 1, users
586    # should use logging.vlog instead for such cases.
587    # Treat this as vlog, 1 is equivalent to DEBUG.
588    standard_level = converter.STANDARD_DEBUG - (level - 1)
589  else:
590    if level < converter.ABSL_FATAL:
591      level = converter.ABSL_FATAL
592    standard_level = converter.absl_to_standard(level)
593  return _absl_logger.isEnabledFor(standard_level)
594
595
596def flush():
597  """Flushes all log files."""
598  get_absl_handler().flush()
599
600
601def level_debug():
602  """Returns True if debug logging is turned on."""
603  return get_verbosity() >= DEBUG
604
605
606def level_info():
607  """Returns True if info logging is turned on."""
608  return get_verbosity() >= INFO
609
610
611def level_warning():
612  """Returns True if warning logging is turned on."""
613  return get_verbosity() >= WARNING
614
615
616level_warn = level_warning  # Deprecated function.
617
618
619def level_error():
620  """Returns True if error logging is turned on."""
621  return get_verbosity() >= ERROR
622
623
624def get_log_file_name(level=INFO):
625  """Returns the name of the log file.
626
627  For Python logging, only one file is used and level is ignored. And it returns
628  empty string if it logs to stderr/stdout or the log stream has no `name`
629  attribute.
630
631  Args:
632    level: int, the absl.logging level.
633
634  Raises:
635    ValueError: Raised when `level` has an invalid value.
636  """
637  if level not in converter.ABSL_LEVELS:
638    raise ValueError('Invalid absl.logging level {}'.format(level))
639  stream = get_absl_handler().python_handler.stream
640  if (stream == sys.stderr or stream == sys.stdout or
641      not hasattr(stream, 'name')):
642    return ''
643  else:
644    return stream.name
645
646
647def find_log_dir_and_names(program_name=None, log_dir=None):
648  """Computes the directory and filename prefix for log file.
649
650  Args:
651    program_name: str|None, the filename part of the path to the program that
652        is running without its extension.  e.g: if your program is called
653        ``usr/bin/foobar.py`` this method should probably be called with
654        ``program_name='foobar`` However, this is just a convention, you can
655        pass in any string you want, and it will be used as part of the
656        log filename. If you don't pass in anything, the default behavior
657        is as described in the example.  In python standard logging mode,
658        the program_name will be prepended with ``py_`` if it is the
659        ``program_name`` argument is omitted.
660    log_dir: str|None, the desired log directory.
661
662  Returns:
663    (log_dir, file_prefix, symlink_prefix)
664
665  Raises:
666    FileNotFoundError: raised in Python 3 when it cannot find a log directory.
667    OSError: raised in Python 2 when it cannot find a log directory.
668  """
669  if not program_name:
670    # Strip the extension (foobar.par becomes foobar, and
671    # fubar.py becomes fubar). We do this so that the log
672    # file names are similar to C++ log file names.
673    program_name = os.path.splitext(os.path.basename(sys.argv[0]))[0]
674
675    # Prepend py_ to files so that python code gets a unique file, and
676    # so that C++ libraries do not try to write to the same log files as us.
677    program_name = 'py_%s' % program_name
678
679  actual_log_dir = find_log_dir(log_dir=log_dir)
680
681  try:
682    username = getpass.getuser()
683  except KeyError:
684    # This can happen, e.g. when running under docker w/o passwd file.
685    if hasattr(os, 'getuid'):
686      # Windows doesn't have os.getuid
687      username = str(os.getuid())
688    else:
689      username = 'unknown'
690  hostname = socket.gethostname()
691  file_prefix = '%s.%s.%s.log' % (program_name, hostname, username)
692
693  return actual_log_dir, file_prefix, program_name
694
695
696def find_log_dir(log_dir=None):
697  """Returns the most suitable directory to put log files into.
698
699  Args:
700    log_dir: str|None, if specified, the logfile(s) will be created in that
701        directory.  Otherwise if the --log_dir command-line flag is provided,
702        the logfile will be created in that directory.  Otherwise the logfile
703        will be created in a standard location.
704
705  Raises:
706    FileNotFoundError: raised in Python 3 when it cannot find a log directory.
707    OSError: raised in Python 2 when it cannot find a log directory.
708  """
709  # Get a list of possible log dirs (will try to use them in order).
710  if log_dir:
711    # log_dir was explicitly specified as an arg, so use it and it alone.
712    dirs = [log_dir]
713  elif FLAGS['log_dir'].value:
714    # log_dir flag was provided, so use it and it alone (this mimics the
715    # behavior of the same flag in logging.cc).
716    dirs = [FLAGS['log_dir'].value]
717  else:
718    dirs = ['/tmp/', './']
719
720  # Find the first usable log dir.
721  for d in dirs:
722    if os.path.isdir(d) and os.access(d, os.W_OK):
723      return d
724  raise FileNotFoundError(
725      "Can't find a writable directory for logs, tried %s" % dirs)
726
727
728def get_absl_log_prefix(record):
729  """Returns the absl log prefix for the log record.
730
731  Args:
732    record: logging.LogRecord, the record to get prefix for.
733  """
734  created_tuple = time.localtime(record.created)
735  created_microsecond = int(record.created % 1.0 * 1e6)
736
737  critical_prefix = ''
738  level = record.levelno
739  if _is_non_absl_fatal_record(record):
740    # When the level is FATAL, but not logged from absl, lower the level so
741    # it's treated as ERROR.
742    level = logging.ERROR
743    critical_prefix = _CRITICAL_PREFIX
744  severity = converter.get_initial_for_level(level)
745
746  return '%c%02d%02d %02d:%02d:%02d.%06d %5d %s:%d] %s' % (
747      severity,
748      created_tuple.tm_mon,
749      created_tuple.tm_mday,
750      created_tuple.tm_hour,
751      created_tuple.tm_min,
752      created_tuple.tm_sec,
753      created_microsecond,
754      _get_thread_id(),
755      record.filename,
756      record.lineno,
757      critical_prefix)
758
759
760def skip_log_prefix(func):
761  """Skips reporting the prefix of a given function or name by :class:`~absl.logging.ABSLLogger`.
762
763  This is a convenience wrapper function / decorator for
764  :meth:`~absl.logging.ABSLLogger.register_frame_to_skip`.
765
766  If a callable function is provided, only that function will be skipped.
767  If a function name is provided, all functions with the same name in the
768  file that this is called in will be skipped.
769
770  This can be used as a decorator of the intended function to be skipped.
771
772  Args:
773    func: Callable function or its name as a string.
774
775  Returns:
776    func (the input, unchanged).
777
778  Raises:
779    ValueError: The input is callable but does not have a function code object.
780    TypeError: The input is neither callable nor a string.
781  """
782  if callable(func):
783    func_code = getattr(func, '__code__', None)
784    if func_code is None:
785      raise ValueError('Input callable does not have a function code object.')
786    file_name = func_code.co_filename
787    func_name = func_code.co_name
788    func_lineno = func_code.co_firstlineno
789  elif isinstance(func, str):
790    file_name = get_absl_logger().findCaller()[0]
791    func_name = func
792    func_lineno = None
793  else:
794    raise TypeError('Input is neither callable nor a string.')
795  ABSLLogger.register_frame_to_skip(file_name, func_name, func_lineno)
796  return func
797
798
799def _is_non_absl_fatal_record(log_record):
800  return (log_record.levelno >= logging.FATAL and
801          not log_record.__dict__.get(_ABSL_LOG_FATAL, False))
802
803
804def _is_absl_fatal_record(log_record):
805  return (log_record.levelno >= logging.FATAL and
806          log_record.__dict__.get(_ABSL_LOG_FATAL, False))
807
808
809# Indicates if we still need to warn about pre-init logs going to stderr.
810_warn_preinit_stderr = True
811
812
813class PythonHandler(logging.StreamHandler):
814  """The handler class used by Abseil Python logging implementation."""
815
816  def __init__(self, stream=None, formatter=None):
817    super(PythonHandler, self).__init__(stream)
818    self.setFormatter(formatter or PythonFormatter())
819
820  def start_logging_to_file(self, program_name=None, log_dir=None):
821    """Starts logging messages to files instead of standard error."""
822    FLAGS.logtostderr = False
823
824    actual_log_dir, file_prefix, symlink_prefix = find_log_dir_and_names(
825        program_name=program_name, log_dir=log_dir)
826
827    basename = '%s.INFO.%s.%d' % (
828        file_prefix,
829        time.strftime('%Y%m%d-%H%M%S', time.localtime(time.time())),
830        os.getpid())
831    filename = os.path.join(actual_log_dir, basename)
832
833    self.stream = open(filename, 'a', encoding='utf-8')
834
835    # os.symlink is not available on Windows Python 2.
836    if getattr(os, 'symlink', None):
837      # Create a symlink to the log file with a canonical name.
838      symlink = os.path.join(actual_log_dir, symlink_prefix + '.INFO')
839      try:
840        if os.path.islink(symlink):
841          os.unlink(symlink)
842        os.symlink(os.path.basename(filename), symlink)
843      except EnvironmentError:
844        # If it fails, we're sad but it's no error.  Commonly, this
845        # fails because the symlink was created by another user and so
846        # we can't modify it
847        pass
848
849  def use_absl_log_file(self, program_name=None, log_dir=None):
850    """Conditionally logs to files, based on --logtostderr."""
851    if FLAGS['logtostderr'].value:
852      self.stream = sys.stderr
853    else:
854      self.start_logging_to_file(program_name=program_name, log_dir=log_dir)
855
856  def flush(self):
857    """Flushes all log files."""
858    self.acquire()
859    try:
860      self.stream.flush()
861    except (EnvironmentError, ValueError):
862      # A ValueError is thrown if we try to flush a closed file.
863      pass
864    finally:
865      self.release()
866
867  def _log_to_stderr(self, record):
868    """Emits the record to stderr.
869
870    This temporarily sets the handler stream to stderr, calls
871    StreamHandler.emit, then reverts the stream back.
872
873    Args:
874      record: logging.LogRecord, the record to log.
875    """
876    # emit() is protected by a lock in logging.Handler, so we don't need to
877    # protect here again.
878    old_stream = self.stream
879    self.stream = sys.stderr
880    try:
881      super(PythonHandler, self).emit(record)
882    finally:
883      self.stream = old_stream
884
885  def emit(self, record):
886    """Prints a record out to some streams.
887
888    1. If ``FLAGS.logtostderr`` is set, it will print to ``sys.stderr`` ONLY.
889    2. If ``FLAGS.alsologtostderr`` is set, it will print to ``sys.stderr``.
890    3. If ``FLAGS.logtostderr`` is not set, it will log to the stream
891        associated with the current thread.
892
893    Args:
894      record: :class:`logging.LogRecord`, the record to emit.
895    """
896    # People occasionally call logging functions at import time before
897    # our flags may have even been defined yet, let alone even parsed, as we
898    # rely on the C++ side to define some flags for us and app init to
899    # deal with parsing.  Match the C++ library behavior of notify and emit
900    # such messages to stderr.  It encourages people to clean-up and does
901    # not hide the message.
902    level = record.levelno
903    if not FLAGS.is_parsed():  # Also implies "before flag has been defined".
904      global _warn_preinit_stderr
905      if _warn_preinit_stderr:
906        sys.stderr.write(
907            'WARNING: Logging before flag parsing goes to stderr.\n')
908        _warn_preinit_stderr = False
909      self._log_to_stderr(record)
910    elif FLAGS['logtostderr'].value:
911      self._log_to_stderr(record)
912    else:
913      super(PythonHandler, self).emit(record)
914      stderr_threshold = converter.string_to_standard(
915          FLAGS['stderrthreshold'].value)
916      if ((FLAGS['alsologtostderr'].value or level >= stderr_threshold) and
917          self.stream != sys.stderr):
918        self._log_to_stderr(record)
919    # Die when the record is created from ABSLLogger and level is FATAL.
920    if _is_absl_fatal_record(record):
921      self.flush()  # Flush the log before dying.
922
923      # In threaded python, sys.exit() from a non-main thread only
924      # exits the thread in question.
925      os.abort()
926
927  def close(self):
928    """Closes the stream to which we are writing."""
929    self.acquire()
930    try:
931      self.flush()
932      try:
933        # Do not close the stream if it's sys.stderr|stdout. They may be
934        # redirected or overridden to files, which should be managed by users
935        # explicitly.
936        user_managed = sys.stderr, sys.stdout, sys.__stderr__, sys.__stdout__
937        if self.stream not in user_managed and (
938            not hasattr(self.stream, 'isatty') or not self.stream.isatty()):
939          self.stream.close()
940      except ValueError:
941        # A ValueError is thrown if we try to run isatty() on a closed file.
942        pass
943      super(PythonHandler, self).close()
944    finally:
945      self.release()
946
947
948class ABSLHandler(logging.Handler):
949  """Abseil Python logging module's log handler."""
950
951  def __init__(self, python_logging_formatter):
952    super(ABSLHandler, self).__init__()
953
954    self._python_handler = PythonHandler(formatter=python_logging_formatter)
955    self.activate_python_handler()
956
957  def format(self, record):
958    return self._current_handler.format(record)
959
960  def setFormatter(self, fmt):
961    self._current_handler.setFormatter(fmt)
962
963  def emit(self, record):
964    self._current_handler.emit(record)
965
966  def flush(self):
967    self._current_handler.flush()
968
969  def close(self):
970    super(ABSLHandler, self).close()
971    self._current_handler.close()
972
973  def handle(self, record):
974    rv = self.filter(record)
975    if rv:
976      return self._current_handler.handle(record)
977    return rv
978
979  @property
980  def python_handler(self):
981    return self._python_handler
982
983  def activate_python_handler(self):
984    """Uses the Python logging handler as the current logging handler."""
985    self._current_handler = self._python_handler
986
987  def use_absl_log_file(self, program_name=None, log_dir=None):
988    self._current_handler.use_absl_log_file(program_name, log_dir)
989
990  def start_logging_to_file(self, program_name=None, log_dir=None):
991    self._current_handler.start_logging_to_file(program_name, log_dir)
992
993
994class PythonFormatter(logging.Formatter):
995  """Formatter class used by :class:`~absl.logging.PythonHandler`."""
996
997  def format(self, record):
998    """Appends the message from the record to the results of the prefix.
999
1000    Args:
1001      record: logging.LogRecord, the record to be formatted.
1002
1003    Returns:
1004      The formatted string representing the record.
1005    """
1006    if (not FLAGS['showprefixforinfo'].value and
1007        FLAGS['verbosity'].value == converter.ABSL_INFO and
1008        record.levelno == logging.INFO and
1009        _absl_handler.python_handler.stream == sys.stderr):
1010      prefix = ''
1011    else:
1012      prefix = get_absl_log_prefix(record)
1013    return prefix + super(PythonFormatter, self).format(record)
1014
1015
1016class ABSLLogger(logging.getLoggerClass()):
1017  """A logger that will create LogRecords while skipping some stack frames.
1018
1019  This class maintains an internal list of filenames and method names
1020  for use when determining who called the currently executing stack
1021  frame.  Any method names from specific source files are skipped when
1022  walking backwards through the stack.
1023
1024  Client code should use the register_frame_to_skip method to let the
1025  ABSLLogger know which method from which file should be
1026  excluded from the walk backwards through the stack.
1027  """
1028  _frames_to_skip = set()
1029
1030  def findCaller(self, stack_info=False, stacklevel=1):
1031    """Finds the frame of the calling method on the stack.
1032
1033    This method skips any frames registered with the
1034    ABSLLogger and any methods from this file, and whatever
1035    method is currently being used to generate the prefix for the log
1036    line.  Then it returns the file name, line number, and method name
1037    of the calling method.  An optional fourth item may be returned,
1038    callers who only need things from the first three are advised to
1039    always slice or index the result rather than using direct unpacking
1040    assignment.
1041
1042    Args:
1043      stack_info: bool, when True, include the stack trace as a fourth item
1044          returned.  On Python 3 there are always four items returned - the
1045          fourth will be None when this is False.  On Python 2 the stdlib
1046          base class API only returns three items.  We do the same when this
1047          new parameter is unspecified or False for compatibility.
1048
1049    Returns:
1050      (filename, lineno, methodname[, sinfo]) of the calling method.
1051    """
1052    f_to_skip = ABSLLogger._frames_to_skip
1053    # Use sys._getframe(2) instead of logging.currentframe(), it's slightly
1054    # faster because there is one less frame to traverse.
1055    frame = sys._getframe(2)  # pylint: disable=protected-access
1056
1057    while frame:
1058      code = frame.f_code
1059      if (_LOGGING_FILE_PREFIX not in code.co_filename and
1060          (code.co_filename, code.co_name,
1061           code.co_firstlineno) not in f_to_skip and
1062          (code.co_filename, code.co_name) not in f_to_skip):
1063        sinfo = None
1064        if stack_info:
1065          out = io.StringIO()
1066          out.write(u'Stack (most recent call last):\n')
1067          traceback.print_stack(frame, file=out)
1068          sinfo = out.getvalue().rstrip(u'\n')
1069        return (code.co_filename, frame.f_lineno, code.co_name, sinfo)
1070      frame = frame.f_back
1071
1072  def critical(self, msg, *args, **kwargs):
1073    """Logs ``msg % args`` with severity ``CRITICAL``."""
1074    self.log(logging.CRITICAL, msg, *args, **kwargs)
1075
1076  def fatal(self, msg, *args, **kwargs):
1077    """Logs ``msg % args`` with severity ``FATAL``."""
1078    self.log(logging.FATAL, msg, *args, **kwargs)
1079
1080  def error(self, msg, *args, **kwargs):
1081    """Logs ``msg % args`` with severity ``ERROR``."""
1082    self.log(logging.ERROR, msg, *args, **kwargs)
1083
1084  def warn(self, msg, *args, **kwargs):
1085    """Logs ``msg % args`` with severity ``WARN``."""
1086    warnings.warn("The 'warn' method is deprecated, use 'warning' instead",
1087                  DeprecationWarning, 2)
1088    self.log(logging.WARN, msg, *args, **kwargs)
1089
1090  def warning(self, msg, *args, **kwargs):
1091    """Logs ``msg % args`` with severity ``WARNING``."""
1092    self.log(logging.WARNING, msg, *args, **kwargs)
1093
1094  def info(self, msg, *args, **kwargs):
1095    """Logs ``msg % args`` with severity ``INFO``."""
1096    self.log(logging.INFO, msg, *args, **kwargs)
1097
1098  def debug(self, msg, *args, **kwargs):
1099    """Logs ``msg % args`` with severity ``DEBUG``."""
1100    self.log(logging.DEBUG, msg, *args, **kwargs)
1101
1102  def log(self, level, msg, *args, **kwargs):
1103    """Logs a message at a cetain level substituting in the supplied arguments.
1104
1105    This method behaves differently in python and c++ modes.
1106
1107    Args:
1108      level: int, the standard logging level at which to log the message.
1109      msg: str, the text of the message to log.
1110      *args: The arguments to substitute in the message.
1111      **kwargs: The keyword arguments to substitute in the message.
1112    """
1113    if level >= logging.FATAL:
1114      # Add property to the LogRecord created by this logger.
1115      # This will be used by the ABSLHandler to determine whether it should
1116      # treat CRITICAL/FATAL logs as really FATAL.
1117      extra = kwargs.setdefault('extra', {})
1118      extra[_ABSL_LOG_FATAL] = True
1119    super(ABSLLogger, self).log(level, msg, *args, **kwargs)
1120
1121  def handle(self, record):
1122    """Calls handlers without checking ``Logger.disabled``.
1123
1124    Non-root loggers are set to disabled after setup with :func:`logging.config`
1125    if it's not explicitly specified. Historically, absl logging will not be
1126    disabled by that. To maintaining this behavior, this function skips
1127    checking the ``Logger.disabled`` bit.
1128
1129    This logger can still be disabled by adding a filter that filters out
1130    everything.
1131
1132    Args:
1133      record: logging.LogRecord, the record to handle.
1134    """
1135    if self.filter(record):
1136      self.callHandlers(record)
1137
1138  @classmethod
1139  def register_frame_to_skip(cls, file_name, function_name, line_number=None):
1140    """Registers a function name to skip when walking the stack.
1141
1142    The :class:`~absl.logging.ABSLLogger` sometimes skips method calls on the
1143    stack to make the log messages meaningful in their appropriate context.
1144    This method registers a function from a particular file as one
1145    which should be skipped.
1146
1147    Args:
1148      file_name: str, the name of the file that contains the function.
1149      function_name: str, the name of the function to skip.
1150      line_number: int, if provided, only the function with this starting line
1151          number will be skipped. Otherwise, all functions with the same name
1152          in the file will be skipped.
1153    """
1154    if line_number is not None:
1155      cls._frames_to_skip.add((file_name, function_name, line_number))
1156    else:
1157      cls._frames_to_skip.add((file_name, function_name))
1158
1159
1160def _get_thread_id():
1161  """Gets id of current thread, suitable for logging as an unsigned quantity.
1162
1163  If pywrapbase is linked, returns GetTID() for the thread ID to be
1164  consistent with C++ logging.  Otherwise, returns the numeric thread id.
1165  The quantities are made unsigned by masking with 2*sys.maxint + 1.
1166
1167  Returns:
1168    Thread ID unique to this process (unsigned)
1169  """
1170  thread_id = threading.get_ident()
1171  return thread_id & _THREAD_ID_MASK
1172
1173
1174def get_absl_logger():
1175  """Returns the absl logger instance."""
1176  return _absl_logger
1177
1178
1179def get_absl_handler():
1180  """Returns the absl handler instance."""
1181  return _absl_handler
1182
1183
1184def use_python_logging(quiet=False):
1185  """Uses the python implementation of the logging code.
1186
1187  Args:
1188    quiet: No logging message about switching logging type.
1189  """
1190  get_absl_handler().activate_python_handler()
1191  if not quiet:
1192    info('Restoring pure python logging')
1193
1194
1195_attempted_to_remove_stderr_stream_handlers = False
1196
1197
1198def use_absl_handler():
1199  """Uses the ABSL logging handler for logging.
1200
1201  This method is called in :func:`app.run()<absl.app.run>` so the absl handler
1202  is used in absl apps.
1203  """
1204  global _attempted_to_remove_stderr_stream_handlers
1205  if not _attempted_to_remove_stderr_stream_handlers:
1206    # The absl handler logs to stderr by default. To prevent double logging to
1207    # stderr, the following code tries its best to remove other handlers that
1208    # emit to stderr. Those handlers are most commonly added when
1209    # logging.info/debug is called before calling use_absl_handler().
1210    handlers = [
1211        h for h in logging.root.handlers
1212        if isinstance(h, logging.StreamHandler) and h.stream == sys.stderr]
1213    for h in handlers:
1214      logging.root.removeHandler(h)
1215    _attempted_to_remove_stderr_stream_handlers = True
1216
1217  absl_handler = get_absl_handler()
1218  if absl_handler not in logging.root.handlers:
1219    logging.root.addHandler(absl_handler)
1220    FLAGS['verbosity']._update_logging_levels()  # pylint: disable=protected-access
1221    FLAGS['logger_levels']._update_logger_levels()  # pylint: disable=protected-access
1222
1223
1224def _initialize():
1225  """Initializes loggers and handlers."""
1226  global _absl_logger, _absl_handler
1227
1228  if _absl_logger:
1229    return
1230
1231  original_logger_class = logging.getLoggerClass()
1232  logging.setLoggerClass(ABSLLogger)
1233  _absl_logger = logging.getLogger('absl')
1234  logging.setLoggerClass(original_logger_class)
1235
1236  python_logging_formatter = PythonFormatter()
1237  _absl_handler = ABSLHandler(python_logging_formatter)
1238
1239
1240_initialize()
1241