• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2017 The Abseil Authors.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#      http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15"""Abseil Python logging module implemented on top of standard logging.
16
17Simple usage::
18
19    from absl import logging
20
21    logging.info('Interesting Stuff')
22    logging.info('Interesting Stuff with Arguments: %d', 42)
23
24    logging.set_verbosity(logging.INFO)
25    logging.log(logging.DEBUG, 'This will *not* be printed')
26    logging.set_verbosity(logging.DEBUG)
27    logging.log(logging.DEBUG, 'This will be printed')
28
29    logging.warning('Worrying Stuff')
30    logging.error('Alarming Stuff')
31    logging.fatal('AAAAHHHHH!!!!')  # Process exits.
32
33Usage note: Do not pre-format the strings in your program code.
34Instead, let the logging module perform argument interpolation.
35This saves cycles because strings that don't need to be printed
36are never formatted.  Note that this module does not attempt to
37interpolate arguments when no arguments are given.  In other words::
38
39    logging.info('Interesting Stuff: %s')
40
41does not raise an exception because logging.info() has only one
42argument, the message string.
43
44"Lazy" evaluation for debugging
45-------------------------------
46
47If you do something like this::
48
49    logging.debug('Thing: %s', thing.ExpensiveOp())
50
51then the ExpensiveOp will be evaluated even if nothing
52is printed to the log. To avoid this, use the level_debug() function::
53
54  if logging.level_debug():
55    logging.debug('Thing: %s', thing.ExpensiveOp())
56
57Per file level logging is supported by logging.vlog() and
58logging.vlog_is_on(). For example::
59
60    if logging.vlog_is_on(2):
61      logging.vlog(2, very_expensive_debug_message())
62
63Notes on Unicode
64----------------
65
66The log output is encoded as UTF-8.  Don't pass data in other encodings in
67bytes() instances -- instead pass unicode string instances when you need to
68(for both the format string and arguments).
69
70Note on critical and fatal:
71Standard logging module defines fatal as an alias to critical, but it's not
72documented, and it does NOT actually terminate the program.
73This module only defines fatal but not critical, and it DOES terminate the
74program.
75
76The differences in behavior are historical and unfortunate.
77"""
78
79import collections
80from collections import abc
81import getpass
82import io
83import itertools
84import logging
85import os
86import socket
87import struct
88import sys
89import tempfile
90import threading
91import tempfile
92import time
93import timeit
94import traceback
95import types
96import warnings
97
98from absl import flags
99from absl.logging import converter
100
101try:
102  from typing import NoReturn
103except ImportError:
104  pass
105
106
107FLAGS = flags.FLAGS
108
109
110# Logging levels.
111FATAL = converter.ABSL_FATAL
112ERROR = converter.ABSL_ERROR
113WARNING = converter.ABSL_WARNING
114WARN = converter.ABSL_WARNING  # Deprecated name.
115INFO = converter.ABSL_INFO
116DEBUG = converter.ABSL_DEBUG
117
118# Regex to match/parse log line prefixes.
119ABSL_LOGGING_PREFIX_REGEX = (
120    r'^(?P<severity>[IWEF])'
121    r'(?P<month>\d\d)(?P<day>\d\d) '
122    r'(?P<hour>\d\d):(?P<minute>\d\d):(?P<second>\d\d)'
123    r'\.(?P<microsecond>\d\d\d\d\d\d) +'
124    r'(?P<thread_id>-?\d+) '
125    r'(?P<filename>[a-zA-Z<][\w._<>-]+):(?P<line>\d+)')
126
127
128# Mask to convert integer thread ids to unsigned quantities for logging purposes
129_THREAD_ID_MASK = 2 ** (struct.calcsize('L') * 8) - 1
130
131# Extra property set on the LogRecord created by ABSLLogger when its level is
132# CRITICAL/FATAL.
133_ABSL_LOG_FATAL = '_absl_log_fatal'
134# Extra prefix added to the log message when a non-absl logger logs a
135# CRITICAL/FATAL message.
136_CRITICAL_PREFIX = 'CRITICAL - '
137
138# Used by findCaller to skip callers from */logging/__init__.py.
139_LOGGING_FILE_PREFIX = os.path.join('logging', '__init__.')
140
141# The ABSL logger instance, initialized in _initialize().
142_absl_logger = None
143# The ABSL handler instance, initialized in _initialize().
144_absl_handler = None
145
146
147_CPP_NAME_TO_LEVELS = {
148    'debug': '0',  # Abseil C++ has no DEBUG level, mapping it to INFO here.
149    'info': '0',
150    'warning': '1',
151    'warn': '1',
152    'error': '2',
153    'fatal': '3'
154}
155
156_CPP_LEVEL_TO_NAMES = {
157    '0': 'info',
158    '1': 'warning',
159    '2': 'error',
160    '3': 'fatal',
161}
162
163
164class _VerbosityFlag(flags.Flag):
165  """Flag class for -v/--verbosity."""
166
167  def __init__(self, *args, **kwargs):
168    super(_VerbosityFlag, self).__init__(
169        flags.IntegerParser(),
170        flags.ArgumentSerializer(),
171        *args, **kwargs)
172
173  @property
174  def value(self):
175    return self._value
176
177  @value.setter
178  def value(self, v):
179    self._value = v
180    self._update_logging_levels()
181
182  def _update_logging_levels(self):
183    """Updates absl logging levels to the current verbosity.
184
185    Visibility: module-private
186    """
187    if not _absl_logger:
188      return
189
190    if self._value <= converter.ABSL_DEBUG:
191      standard_verbosity = converter.absl_to_standard(self._value)
192    else:
193      # --verbosity is set to higher than 1 for vlog.
194      standard_verbosity = logging.DEBUG - (self._value - 1)
195
196    # Also update root level when absl_handler is used.
197    if _absl_handler in logging.root.handlers:
198      # Make absl logger inherit from the root logger. absl logger might have
199      # a non-NOTSET value if logging.set_verbosity() is called at import time.
200      _absl_logger.setLevel(logging.NOTSET)
201      logging.root.setLevel(standard_verbosity)
202    else:
203      _absl_logger.setLevel(standard_verbosity)
204
205
206class _LoggerLevelsFlag(flags.Flag):
207  """Flag class for --logger_levels."""
208
209  def __init__(self, *args, **kwargs):
210    super(_LoggerLevelsFlag, self).__init__(
211        _LoggerLevelsParser(),
212        _LoggerLevelsSerializer(),
213        *args, **kwargs)
214
215  @property
216  def value(self):
217    # For lack of an immutable type, be defensive and return a copy.
218    # Modifications to the dict aren't supported and won't have any affect.
219    # While Py3 could use MappingProxyType, that isn't deepcopy friendly, so
220    # just return a copy.
221    return self._value.copy()
222
223  @value.setter
224  def value(self, v):
225    self._value = {} if v is None else v
226    self._update_logger_levels()
227
228  def _update_logger_levels(self):
229    # Visibility: module-private.
230    # This is called by absl.app.run() during initialization.
231    for name, level in self._value.items():
232      logging.getLogger(name).setLevel(level)
233
234
235class _LoggerLevelsParser(flags.ArgumentParser):
236  """Parser for --logger_levels flag."""
237
238  def parse(self, value):
239    if isinstance(value, abc.Mapping):
240      return value
241
242    pairs = [pair.strip() for pair in value.split(',') if pair.strip()]
243
244    # Preserve the order so that serialization is deterministic.
245    levels = collections.OrderedDict()
246    for name_level in pairs:
247      name, level = name_level.split(':', 1)
248      name = name.strip()
249      level = level.strip()
250      levels[name] = level
251    return levels
252
253
254class _LoggerLevelsSerializer(object):
255  """Serializer for --logger_levels flag."""
256
257  def serialize(self, value):
258    if isinstance(value, str):
259      return value
260    return ','.join(
261        '{}:{}'.format(name, level) for name, level in value.items())
262
263
264class _StderrthresholdFlag(flags.Flag):
265  """Flag class for --stderrthreshold."""
266
267  def __init__(self, *args, **kwargs):
268    super(_StderrthresholdFlag, self).__init__(
269        flags.ArgumentParser(),
270        flags.ArgumentSerializer(),
271        *args, **kwargs)
272
273  @property
274  def value(self):
275    return self._value
276
277  @value.setter
278  def value(self, v):
279    if v in _CPP_LEVEL_TO_NAMES:
280      # --stderrthreshold also accepts numeric strings whose values are
281      # Abseil C++ log levels.
282      cpp_value = int(v)
283      v = _CPP_LEVEL_TO_NAMES[v]  # Normalize to strings.
284    elif v.lower() in _CPP_NAME_TO_LEVELS:
285      v = v.lower()
286      if v == 'warn':
287        v = 'warning'  # Use 'warning' as the canonical name.
288      cpp_value = int(_CPP_NAME_TO_LEVELS[v])
289    else:
290      raise ValueError(
291          '--stderrthreshold must be one of (case-insensitive) '
292          "'debug', 'info', 'warning', 'error', 'fatal', "
293          "or '0', '1', '2', '3', not '%s'" % v)
294
295    self._value = v
296
297
298flags.DEFINE_boolean('logtostderr',
299                     False,
300                     'Should only log to stderr?', allow_override_cpp=True)
301flags.DEFINE_boolean('alsologtostderr',
302                     False,
303                     'also log to stderr?', allow_override_cpp=True)
304flags.DEFINE_string('log_dir',
305                    os.getenv('TEST_TMPDIR', ''),
306                    'directory to write logfiles into',
307                    allow_override_cpp=True)
308flags.DEFINE_flag(_VerbosityFlag(
309    'verbosity', -1,
310    'Logging verbosity level. Messages logged at this level or lower will '
311    'be included. Set to 1 for debug logging. If the flag was not set or '
312    'supplied, the value will be changed from the default of -1 (warning) to '
313    '0 (info) after flags are parsed.',
314    short_name='v', allow_hide_cpp=True))
315flags.DEFINE_flag(
316    _LoggerLevelsFlag(
317        'logger_levels', {},
318        'Specify log level of loggers. The format is a CSV list of '
319        '`name:level`. Where `name` is the logger name used with '
320        '`logging.getLogger()`, and `level` is a level name  (INFO, DEBUG, '
321        'etc). e.g. `myapp.foo:INFO,other.logger:DEBUG`'))
322flags.DEFINE_flag(_StderrthresholdFlag(
323    'stderrthreshold', 'fatal',
324    'log messages at this level, or more severe, to stderr in '
325    'addition to the logfile.  Possible values are '
326    "'debug', 'info', 'warning', 'error', and 'fatal'.  "
327    'Obsoletes --alsologtostderr. Using --alsologtostderr '
328    'cancels the effect of this flag. Please also note that '
329    'this flag is subject to --verbosity and requires logfile '
330    'not be stderr.', allow_hide_cpp=True))
331flags.DEFINE_boolean('showprefixforinfo', True,
332                     'If False, do not prepend prefix to info messages '
333                     'when it\'s logged to stderr, '
334                     '--verbosity is set to INFO level, '
335                     'and python logging is used.')
336
337
338def get_verbosity():
339  """Returns the logging verbosity."""
340  return FLAGS['verbosity'].value
341
342
343def set_verbosity(v):
344  """Sets the logging verbosity.
345
346  Causes all messages of level <= v to be logged,
347  and all messages of level > v to be silently discarded.
348
349  Args:
350    v: int|str, the verbosity level as an integer or string. Legal string values
351        are those that can be coerced to an integer as well as case-insensitive
352        'debug', 'info', 'warning', 'error', and 'fatal'.
353  """
354  try:
355    new_level = int(v)
356  except ValueError:
357    new_level = converter.ABSL_NAMES[v.upper()]
358  FLAGS.verbosity = new_level
359
360
361def set_stderrthreshold(s):
362  """Sets the stderr threshold to the value passed in.
363
364  Args:
365    s: str|int, valid strings values are case-insensitive 'debug',
366        'info', 'warning', 'error', and 'fatal'; valid integer values are
367        logging.DEBUG|INFO|WARNING|ERROR|FATAL.
368
369  Raises:
370      ValueError: Raised when s is an invalid value.
371  """
372  if s in converter.ABSL_LEVELS:
373    FLAGS.stderrthreshold = converter.ABSL_LEVELS[s]
374  elif isinstance(s, str) and s.upper() in converter.ABSL_NAMES:
375    FLAGS.stderrthreshold = s
376  else:
377    raise ValueError(
378        'set_stderrthreshold only accepts integer absl logging level '
379        'from -3 to 1, or case-insensitive string values '
380        "'debug', 'info', 'warning', 'error', and 'fatal'. "
381        'But found "{}" ({}).'.format(s, type(s)))
382
383
384def fatal(msg, *args, **kwargs):
385  # type: (Any, Any, Any) -> NoReturn
386  """Logs a fatal message."""
387  log(FATAL, msg, *args, **kwargs)
388
389
390def error(msg, *args, **kwargs):
391  """Logs an error message."""
392  log(ERROR, msg, *args, **kwargs)
393
394
395def warning(msg, *args, **kwargs):
396  """Logs a warning message."""
397  log(WARNING, msg, *args, **kwargs)
398
399
400def warn(msg, *args, **kwargs):
401  """Deprecated, use 'warning' instead."""
402  warnings.warn("The 'warn' function is deprecated, use 'warning' instead",
403                DeprecationWarning, 2)
404  log(WARNING, msg, *args, **kwargs)
405
406
407def info(msg, *args, **kwargs):
408  """Logs an info message."""
409  log(INFO, msg, *args, **kwargs)
410
411
412def debug(msg, *args, **kwargs):
413  """Logs a debug message."""
414  log(DEBUG, msg, *args, **kwargs)
415
416
417def exception(msg, *args, **kwargs):
418  """Logs an exception, with traceback and message."""
419  error(msg, *args, **kwargs, exc_info=True)
420
421
422# Counter to keep track of number of log entries per token.
423_log_counter_per_token = {}
424
425
426def _get_next_log_count_per_token(token):
427  """Wrapper for _log_counter_per_token. Thread-safe.
428
429  Args:
430    token: The token for which to look up the count.
431
432  Returns:
433    The number of times this function has been called with
434    *token* as an argument (starting at 0).
435  """
436  # Can't use a defaultdict because defaultdict isn't atomic, whereas
437  # setdefault is.
438  return next(_log_counter_per_token.setdefault(token, itertools.count()))
439
440
441def log_every_n(level, msg, n, *args):
442  """Logs ``msg % args`` at level 'level' once per 'n' times.
443
444  Logs the 1st call, (N+1)st call, (2N+1)st call,  etc.
445  Not threadsafe.
446
447  Args:
448    level: int, the absl logging level at which to log.
449    msg: str, the message to be logged.
450    n: int, the number of times this should be called before it is logged.
451    *args: The args to be substituted into the msg.
452  """
453  count = _get_next_log_count_per_token(get_absl_logger().findCaller())
454  log_if(level, msg, not (count % n), *args)
455
456
457# Keeps track of the last log time of the given token.
458# Note: must be a dict since set/get is atomic in CPython.
459# Note: entries are never released as their number is expected to be low.
460_log_timer_per_token = {}
461
462
463def _seconds_have_elapsed(token, num_seconds):
464  """Tests if 'num_seconds' have passed since 'token' was requested.
465
466  Not strictly thread-safe - may log with the wrong frequency if called
467  concurrently from multiple threads. Accuracy depends on resolution of
468  'timeit.default_timer()'.
469
470  Always returns True on the first call for a given 'token'.
471
472  Args:
473    token: The token for which to look up the count.
474    num_seconds: The number of seconds to test for.
475
476  Returns:
477    Whether it has been >= 'num_seconds' since 'token' was last requested.
478  """
479  now = timeit.default_timer()
480  then = _log_timer_per_token.get(token, None)
481  if then is None or (now - then) >= num_seconds:
482    _log_timer_per_token[token] = now
483    return True
484  else:
485    return False
486
487
488def log_every_n_seconds(level, msg, n_seconds, *args):
489  """Logs ``msg % args`` at level ``level`` iff ``n_seconds`` elapsed since last call.
490
491  Logs the first call, logs subsequent calls if 'n' seconds have elapsed since
492  the last logging call from the same call site (file + line). Not thread-safe.
493
494  Args:
495    level: int, the absl logging level at which to log.
496    msg: str, the message to be logged.
497    n_seconds: float or int, seconds which should elapse before logging again.
498    *args: The args to be substituted into the msg.
499  """
500  should_log = _seconds_have_elapsed(get_absl_logger().findCaller(), n_seconds)
501  log_if(level, msg, should_log, *args)
502
503
504def log_first_n(level, msg, n, *args):
505  """Logs ``msg % args`` at level ``level`` only first ``n`` times.
506
507  Not threadsafe.
508
509  Args:
510    level: int, the absl logging level at which to log.
511    msg: str, the message to be logged.
512    n: int, the maximal number of times the message is logged.
513    *args: The args to be substituted into the msg.
514  """
515  count = _get_next_log_count_per_token(get_absl_logger().findCaller())
516  log_if(level, msg, count < n, *args)
517
518
519def log_if(level, msg, condition, *args):
520  """Logs ``msg % args`` at level ``level`` only if condition is fulfilled."""
521  if condition:
522    log(level, msg, *args)
523
524
525def log(level, msg, *args, **kwargs):
526  """Logs ``msg % args`` at absl logging level ``level``.
527
528  If no args are given just print msg, ignoring any interpolation specifiers.
529
530  Args:
531    level: int, the absl logging level at which to log the message
532        (logging.DEBUG|INFO|WARNING|ERROR|FATAL). While some C++ verbose logging
533        level constants are also supported, callers should prefer explicit
534        logging.vlog() calls for such purpose.
535
536    msg: str, the message to be logged.
537    *args: The args to be substituted into the msg.
538    **kwargs: May contain exc_info to add exception traceback to message.
539  """
540  if level > converter.ABSL_DEBUG:
541    # Even though this function supports level that is greater than 1, users
542    # should use logging.vlog instead for such cases.
543    # Treat this as vlog, 1 is equivalent to DEBUG.
544    standard_level = converter.STANDARD_DEBUG - (level - 1)
545  else:
546    if level < converter.ABSL_FATAL:
547      level = converter.ABSL_FATAL
548    standard_level = converter.absl_to_standard(level)
549
550  # Match standard logging's behavior. Before use_absl_handler() and
551  # logging is configured, there is no handler attached on _absl_logger nor
552  # logging.root. So logs go no where.
553  if not logging.root.handlers:
554    logging.basicConfig()
555
556  _absl_logger.log(standard_level, msg, *args, **kwargs)
557
558
559def vlog(level, msg, *args, **kwargs):
560  """Log ``msg % args`` at C++ vlog level ``level``.
561
562  Args:
563    level: int, the C++ verbose logging level at which to log the message,
564        e.g. 1, 2, 3, 4... While absl level constants are also supported,
565        callers should prefer logging.log|debug|info|... calls for such purpose.
566    msg: str, the message to be logged.
567    *args: The args to be substituted into the msg.
568    **kwargs: May contain exc_info to add exception traceback to message.
569  """
570  log(level, msg, *args, **kwargs)
571
572
573def vlog_is_on(level):
574  """Checks if vlog is enabled for the given level in caller's source file.
575
576  Args:
577    level: int, the C++ verbose logging level at which to log the message,
578        e.g. 1, 2, 3, 4... While absl level constants are also supported,
579        callers should prefer level_debug|level_info|... calls for
580        checking those.
581
582  Returns:
583    True if logging is turned on for that level.
584  """
585
586  if level > converter.ABSL_DEBUG:
587    # Even though this function supports level that is greater than 1, users
588    # should use logging.vlog instead for such cases.
589    # Treat this as vlog, 1 is equivalent to DEBUG.
590    standard_level = converter.STANDARD_DEBUG - (level - 1)
591  else:
592    if level < converter.ABSL_FATAL:
593      level = converter.ABSL_FATAL
594    standard_level = converter.absl_to_standard(level)
595  return _absl_logger.isEnabledFor(standard_level)
596
597
598def flush():
599  """Flushes all log files."""
600  get_absl_handler().flush()
601
602
603def level_debug():
604  """Returns True if debug logging is turned on."""
605  return get_verbosity() >= DEBUG
606
607
608def level_info():
609  """Returns True if info logging is turned on."""
610  return get_verbosity() >= INFO
611
612
613def level_warning():
614  """Returns True if warning logging is turned on."""
615  return get_verbosity() >= WARNING
616
617
618level_warn = level_warning  # Deprecated function.
619
620
621def level_error():
622  """Returns True if error logging is turned on."""
623  return get_verbosity() >= ERROR
624
625
626def get_log_file_name(level=INFO):
627  """Returns the name of the log file.
628
629  For Python logging, only one file is used and level is ignored. And it returns
630  empty string if it logs to stderr/stdout or the log stream has no `name`
631  attribute.
632
633  Args:
634    level: int, the absl.logging level.
635
636  Raises:
637    ValueError: Raised when `level` has an invalid value.
638  """
639  if level not in converter.ABSL_LEVELS:
640    raise ValueError('Invalid absl.logging level {}'.format(level))
641  stream = get_absl_handler().python_handler.stream
642  if (stream == sys.stderr or stream == sys.stdout or
643      not hasattr(stream, 'name')):
644    return ''
645  else:
646    return stream.name
647
648
649def find_log_dir_and_names(program_name=None, log_dir=None):
650  """Computes the directory and filename prefix for log file.
651
652  Args:
653    program_name: str|None, the filename part of the path to the program that
654        is running without its extension.  e.g: if your program is called
655        ``usr/bin/foobar.py`` this method should probably be called with
656        ``program_name='foobar`` However, this is just a convention, you can
657        pass in any string you want, and it will be used as part of the
658        log filename. If you don't pass in anything, the default behavior
659        is as described in the example.  In python standard logging mode,
660        the program_name will be prepended with ``py_`` if it is the
661        ``program_name`` argument is omitted.
662    log_dir: str|None, the desired log directory.
663
664  Returns:
665    (log_dir, file_prefix, symlink_prefix)
666
667  Raises:
668    FileNotFoundError: raised in Python 3 when it cannot find a log directory.
669    OSError: raised in Python 2 when it cannot find a log directory.
670  """
671  if not program_name:
672    # Strip the extension (foobar.par becomes foobar, and
673    # fubar.py becomes fubar). We do this so that the log
674    # file names are similar to C++ log file names.
675    program_name = os.path.splitext(os.path.basename(sys.argv[0]))[0]
676
677    # Prepend py_ to files so that python code gets a unique file, and
678    # so that C++ libraries do not try to write to the same log files as us.
679    program_name = 'py_%s' % program_name
680
681  actual_log_dir = find_log_dir(log_dir=log_dir)
682
683  try:
684    username = getpass.getuser()
685  except KeyError:
686    # This can happen, e.g. when running under docker w/o passwd file.
687    if hasattr(os, 'getuid'):
688      # Windows doesn't have os.getuid
689      username = str(os.getuid())
690    else:
691      username = 'unknown'
692  hostname = socket.gethostname()
693  file_prefix = '%s.%s.%s.log' % (program_name, hostname, username)
694
695  return actual_log_dir, file_prefix, program_name
696
697
698def find_log_dir(log_dir=None):
699  """Returns the most suitable directory to put log files into.
700
701  Args:
702    log_dir: str|None, if specified, the logfile(s) will be created in that
703        directory.  Otherwise if the --log_dir command-line flag is provided,
704        the logfile will be created in that directory.  Otherwise the logfile
705        will be created in a standard location.
706
707  Raises:
708    FileNotFoundError: raised in Python 3 when it cannot find a log directory.
709    OSError: raised in Python 2 when it cannot find a log directory.
710  """
711  # Get a list of possible log dirs (will try to use them in order).
712  # NOTE: Google's internal implementation has a special handling for Google
713  # machines, which uses a list of directories. Hence the following uses `dirs`
714  # instead of a single directory.
715  if log_dir:
716    # log_dir was explicitly specified as an arg, so use it and it alone.
717    dirs = [log_dir]
718  elif FLAGS['log_dir'].value:
719    # log_dir flag was provided, so use it and it alone (this mimics the
720    # behavior of the same flag in logging.cc).
721    dirs = [FLAGS['log_dir'].value]
722  else:
723    dirs = [tempfile.gettempdir()]
724
725  # Find the first usable log dir.
726  for d in dirs:
727    if os.path.isdir(d) and os.access(d, os.W_OK):
728      return d
729  raise FileNotFoundError(
730      "Can't find a writable directory for logs, tried %s" % dirs)
731
732
733def get_absl_log_prefix(record):
734  """Returns the absl log prefix for the log record.
735
736  Args:
737    record: logging.LogRecord, the record to get prefix for.
738  """
739  created_tuple = time.localtime(record.created)
740  created_microsecond = int(record.created % 1.0 * 1e6)
741
742  critical_prefix = ''
743  level = record.levelno
744  if _is_non_absl_fatal_record(record):
745    # When the level is FATAL, but not logged from absl, lower the level so
746    # it's treated as ERROR.
747    level = logging.ERROR
748    critical_prefix = _CRITICAL_PREFIX
749  severity = converter.get_initial_for_level(level)
750
751  return '%c%02d%02d %02d:%02d:%02d.%06d %5d %s:%d] %s' % (
752      severity,
753      created_tuple.tm_mon,
754      created_tuple.tm_mday,
755      created_tuple.tm_hour,
756      created_tuple.tm_min,
757      created_tuple.tm_sec,
758      created_microsecond,
759      _get_thread_id(),
760      record.filename,
761      record.lineno,
762      critical_prefix)
763
764
765def skip_log_prefix(func):
766  """Skips reporting the prefix of a given function or name by :class:`~absl.logging.ABSLLogger`.
767
768  This is a convenience wrapper function / decorator for
769  :meth:`~absl.logging.ABSLLogger.register_frame_to_skip`.
770
771  If a callable function is provided, only that function will be skipped.
772  If a function name is provided, all functions with the same name in the
773  file that this is called in will be skipped.
774
775  This can be used as a decorator of the intended function to be skipped.
776
777  Args:
778    func: Callable function or its name as a string.
779
780  Returns:
781    func (the input, unchanged).
782
783  Raises:
784    ValueError: The input is callable but does not have a function code object.
785    TypeError: The input is neither callable nor a string.
786  """
787  if callable(func):
788    func_code = getattr(func, '__code__', None)
789    if func_code is None:
790      raise ValueError('Input callable does not have a function code object.')
791    file_name = func_code.co_filename
792    func_name = func_code.co_name
793    func_lineno = func_code.co_firstlineno
794  elif isinstance(func, str):
795    file_name = get_absl_logger().findCaller()[0]
796    func_name = func
797    func_lineno = None
798  else:
799    raise TypeError('Input is neither callable nor a string.')
800  ABSLLogger.register_frame_to_skip(file_name, func_name, func_lineno)
801  return func
802
803
804def _is_non_absl_fatal_record(log_record):
805  return (log_record.levelno >= logging.FATAL and
806          not log_record.__dict__.get(_ABSL_LOG_FATAL, False))
807
808
809def _is_absl_fatal_record(log_record):
810  return (log_record.levelno >= logging.FATAL and
811          log_record.__dict__.get(_ABSL_LOG_FATAL, False))
812
813
814# Indicates if we still need to warn about pre-init logs going to stderr.
815_warn_preinit_stderr = True
816
817
818class PythonHandler(logging.StreamHandler):
819  """The handler class used by Abseil Python logging implementation."""
820
821  def __init__(self, stream=None, formatter=None):
822    super(PythonHandler, self).__init__(stream)
823    self.setFormatter(formatter or PythonFormatter())
824
825  def start_logging_to_file(self, program_name=None, log_dir=None):
826    """Starts logging messages to files instead of standard error."""
827    FLAGS.logtostderr = False
828
829    actual_log_dir, file_prefix, symlink_prefix = find_log_dir_and_names(
830        program_name=program_name, log_dir=log_dir)
831
832    basename = '%s.INFO.%s.%d' % (
833        file_prefix,
834        time.strftime('%Y%m%d-%H%M%S', time.localtime(time.time())),
835        os.getpid())
836    filename = os.path.join(actual_log_dir, basename)
837
838    self.stream = open(filename, 'a', encoding='utf-8')
839
840    # os.symlink is not available on Windows Python 2.
841    if getattr(os, 'symlink', None):
842      # Create a symlink to the log file with a canonical name.
843      symlink = os.path.join(actual_log_dir, symlink_prefix + '.INFO')
844      try:
845        if os.path.islink(symlink):
846          os.unlink(symlink)
847        os.symlink(os.path.basename(filename), symlink)
848      except EnvironmentError:
849        # If it fails, we're sad but it's no error.  Commonly, this
850        # fails because the symlink was created by another user and so
851        # we can't modify it
852        pass
853
854  def use_absl_log_file(self, program_name=None, log_dir=None):
855    """Conditionally logs to files, based on --logtostderr."""
856    if FLAGS['logtostderr'].value:
857      self.stream = sys.stderr
858    else:
859      self.start_logging_to_file(program_name=program_name, log_dir=log_dir)
860
861  def flush(self):
862    """Flushes all log files."""
863    self.acquire()
864    try:
865      self.stream.flush()
866    except (EnvironmentError, ValueError):
867      # A ValueError is thrown if we try to flush a closed file.
868      pass
869    finally:
870      self.release()
871
872  def _log_to_stderr(self, record):
873    """Emits the record to stderr.
874
875    This temporarily sets the handler stream to stderr, calls
876    StreamHandler.emit, then reverts the stream back.
877
878    Args:
879      record: logging.LogRecord, the record to log.
880    """
881    # emit() is protected by a lock in logging.Handler, so we don't need to
882    # protect here again.
883    old_stream = self.stream
884    self.stream = sys.stderr
885    try:
886      super(PythonHandler, self).emit(record)
887    finally:
888      self.stream = old_stream
889
890  def emit(self, record):
891    """Prints a record out to some streams.
892
893    1. If ``FLAGS.logtostderr`` is set, it will print to ``sys.stderr`` ONLY.
894    2. If ``FLAGS.alsologtostderr`` is set, it will print to ``sys.stderr``.
895    3. If ``FLAGS.logtostderr`` is not set, it will log to the stream
896        associated with the current thread.
897
898    Args:
899      record: :class:`logging.LogRecord`, the record to emit.
900    """
901    # People occasionally call logging functions at import time before
902    # our flags may have even been defined yet, let alone even parsed, as we
903    # rely on the C++ side to define some flags for us and app init to
904    # deal with parsing.  Match the C++ library behavior of notify and emit
905    # such messages to stderr.  It encourages people to clean-up and does
906    # not hide the message.
907    level = record.levelno
908    if not FLAGS.is_parsed():  # Also implies "before flag has been defined".
909      global _warn_preinit_stderr
910      if _warn_preinit_stderr:
911        sys.stderr.write(
912            'WARNING: Logging before flag parsing goes to stderr.\n')
913        _warn_preinit_stderr = False
914      self._log_to_stderr(record)
915    elif FLAGS['logtostderr'].value:
916      self._log_to_stderr(record)
917    else:
918      super(PythonHandler, self).emit(record)
919      stderr_threshold = converter.string_to_standard(
920          FLAGS['stderrthreshold'].value)
921      if ((FLAGS['alsologtostderr'].value or level >= stderr_threshold) and
922          self.stream != sys.stderr):
923        self._log_to_stderr(record)
924    # Die when the record is created from ABSLLogger and level is FATAL.
925    if _is_absl_fatal_record(record):
926      self.flush()  # Flush the log before dying.
927
928      # In threaded python, sys.exit() from a non-main thread only
929      # exits the thread in question.
930      os.abort()
931
932  def close(self):
933    """Closes the stream to which we are writing."""
934    self.acquire()
935    try:
936      self.flush()
937      try:
938        # Do not close the stream if it's sys.stderr|stdout. They may be
939        # redirected or overridden to files, which should be managed by users
940        # explicitly.
941        user_managed = sys.stderr, sys.stdout, sys.__stderr__, sys.__stdout__
942        if self.stream not in user_managed and (
943            not hasattr(self.stream, 'isatty') or not self.stream.isatty()):
944          self.stream.close()
945      except ValueError:
946        # A ValueError is thrown if we try to run isatty() on a closed file.
947        pass
948      super(PythonHandler, self).close()
949    finally:
950      self.release()
951
952
953class ABSLHandler(logging.Handler):
954  """Abseil Python logging module's log handler."""
955
956  def __init__(self, python_logging_formatter):
957    super(ABSLHandler, self).__init__()
958
959    self._python_handler = PythonHandler(formatter=python_logging_formatter)
960    self.activate_python_handler()
961
962  def format(self, record):
963    return self._current_handler.format(record)
964
965  def setFormatter(self, fmt):
966    self._current_handler.setFormatter(fmt)
967
968  def emit(self, record):
969    self._current_handler.emit(record)
970
971  def flush(self):
972    self._current_handler.flush()
973
974  def close(self):
975    super(ABSLHandler, self).close()
976    self._current_handler.close()
977
978  def handle(self, record):
979    rv = self.filter(record)
980    if rv:
981      return self._current_handler.handle(record)
982    return rv
983
984  @property
985  def python_handler(self):
986    return self._python_handler
987
988  def activate_python_handler(self):
989    """Uses the Python logging handler as the current logging handler."""
990    self._current_handler = self._python_handler
991
992  def use_absl_log_file(self, program_name=None, log_dir=None):
993    self._current_handler.use_absl_log_file(program_name, log_dir)
994
995  def start_logging_to_file(self, program_name=None, log_dir=None):
996    self._current_handler.start_logging_to_file(program_name, log_dir)
997
998
999class PythonFormatter(logging.Formatter):
1000  """Formatter class used by :class:`~absl.logging.PythonHandler`."""
1001
1002  def format(self, record):
1003    """Appends the message from the record to the results of the prefix.
1004
1005    Args:
1006      record: logging.LogRecord, the record to be formatted.
1007
1008    Returns:
1009      The formatted string representing the record.
1010    """
1011    if (not FLAGS['showprefixforinfo'].value and
1012        FLAGS['verbosity'].value == converter.ABSL_INFO and
1013        record.levelno == logging.INFO and
1014        _absl_handler.python_handler.stream == sys.stderr):
1015      prefix = ''
1016    else:
1017      prefix = get_absl_log_prefix(record)
1018    return prefix + super(PythonFormatter, self).format(record)
1019
1020
1021class ABSLLogger(logging.getLoggerClass()):
1022  """A logger that will create LogRecords while skipping some stack frames.
1023
1024  This class maintains an internal list of filenames and method names
1025  for use when determining who called the currently executing stack
1026  frame.  Any method names from specific source files are skipped when
1027  walking backwards through the stack.
1028
1029  Client code should use the register_frame_to_skip method to let the
1030  ABSLLogger know which method from which file should be
1031  excluded from the walk backwards through the stack.
1032  """
1033  _frames_to_skip = set()
1034
1035  def findCaller(self, stack_info=False, stacklevel=1):
1036    """Finds the frame of the calling method on the stack.
1037
1038    This method skips any frames registered with the
1039    ABSLLogger and any methods from this file, and whatever
1040    method is currently being used to generate the prefix for the log
1041    line.  Then it returns the file name, line number, and method name
1042    of the calling method.  An optional fourth item may be returned,
1043    callers who only need things from the first three are advised to
1044    always slice or index the result rather than using direct unpacking
1045    assignment.
1046
1047    Args:
1048      stack_info: bool, when True, include the stack trace as a fourth item
1049          returned.  On Python 3 there are always four items returned - the
1050          fourth will be None when this is False.  On Python 2 the stdlib
1051          base class API only returns three items.  We do the same when this
1052          new parameter is unspecified or False for compatibility.
1053
1054    Returns:
1055      (filename, lineno, methodname[, sinfo]) of the calling method.
1056    """
1057    f_to_skip = ABSLLogger._frames_to_skip
1058    # Use sys._getframe(2) instead of logging.currentframe(), it's slightly
1059    # faster because there is one less frame to traverse.
1060    frame = sys._getframe(2)  # pylint: disable=protected-access
1061
1062    while frame:
1063      code = frame.f_code
1064      if (_LOGGING_FILE_PREFIX not in code.co_filename and
1065          (code.co_filename, code.co_name,
1066           code.co_firstlineno) not in f_to_skip and
1067          (code.co_filename, code.co_name) not in f_to_skip):
1068        sinfo = None
1069        if stack_info:
1070          out = io.StringIO()
1071          out.write(u'Stack (most recent call last):\n')
1072          traceback.print_stack(frame, file=out)
1073          sinfo = out.getvalue().rstrip(u'\n')
1074        return (code.co_filename, frame.f_lineno, code.co_name, sinfo)
1075      frame = frame.f_back
1076
1077  def critical(self, msg, *args, **kwargs):
1078    """Logs ``msg % args`` with severity ``CRITICAL``."""
1079    self.log(logging.CRITICAL, msg, *args, **kwargs)
1080
1081  def fatal(self, msg, *args, **kwargs):
1082    """Logs ``msg % args`` with severity ``FATAL``."""
1083    self.log(logging.FATAL, msg, *args, **kwargs)
1084
1085  def error(self, msg, *args, **kwargs):
1086    """Logs ``msg % args`` with severity ``ERROR``."""
1087    self.log(logging.ERROR, msg, *args, **kwargs)
1088
1089  def warn(self, msg, *args, **kwargs):
1090    """Logs ``msg % args`` with severity ``WARN``."""
1091    warnings.warn("The 'warn' method is deprecated, use 'warning' instead",
1092                  DeprecationWarning, 2)
1093    self.log(logging.WARN, msg, *args, **kwargs)
1094
1095  def warning(self, msg, *args, **kwargs):
1096    """Logs ``msg % args`` with severity ``WARNING``."""
1097    self.log(logging.WARNING, msg, *args, **kwargs)
1098
1099  def info(self, msg, *args, **kwargs):
1100    """Logs ``msg % args`` with severity ``INFO``."""
1101    self.log(logging.INFO, msg, *args, **kwargs)
1102
1103  def debug(self, msg, *args, **kwargs):
1104    """Logs ``msg % args`` with severity ``DEBUG``."""
1105    self.log(logging.DEBUG, msg, *args, **kwargs)
1106
1107  def log(self, level, msg, *args, **kwargs):
1108    """Logs a message at a cetain level substituting in the supplied arguments.
1109
1110    This method behaves differently in python and c++ modes.
1111
1112    Args:
1113      level: int, the standard logging level at which to log the message.
1114      msg: str, the text of the message to log.
1115      *args: The arguments to substitute in the message.
1116      **kwargs: The keyword arguments to substitute in the message.
1117    """
1118    if level >= logging.FATAL:
1119      # Add property to the LogRecord created by this logger.
1120      # This will be used by the ABSLHandler to determine whether it should
1121      # treat CRITICAL/FATAL logs as really FATAL.
1122      extra = kwargs.setdefault('extra', {})
1123      extra[_ABSL_LOG_FATAL] = True
1124    super(ABSLLogger, self).log(level, msg, *args, **kwargs)
1125
1126  def handle(self, record):
1127    """Calls handlers without checking ``Logger.disabled``.
1128
1129    Non-root loggers are set to disabled after setup with :func:`logging.config`
1130    if it's not explicitly specified. Historically, absl logging will not be
1131    disabled by that. To maintaining this behavior, this function skips
1132    checking the ``Logger.disabled`` bit.
1133
1134    This logger can still be disabled by adding a filter that filters out
1135    everything.
1136
1137    Args:
1138      record: logging.LogRecord, the record to handle.
1139    """
1140    if self.filter(record):
1141      self.callHandlers(record)
1142
1143  @classmethod
1144  def register_frame_to_skip(cls, file_name, function_name, line_number=None):
1145    """Registers a function name to skip when walking the stack.
1146
1147    The :class:`~absl.logging.ABSLLogger` sometimes skips method calls on the
1148    stack to make the log messages meaningful in their appropriate context.
1149    This method registers a function from a particular file as one
1150    which should be skipped.
1151
1152    Args:
1153      file_name: str, the name of the file that contains the function.
1154      function_name: str, the name of the function to skip.
1155      line_number: int, if provided, only the function with this starting line
1156          number will be skipped. Otherwise, all functions with the same name
1157          in the file will be skipped.
1158    """
1159    if line_number is not None:
1160      cls._frames_to_skip.add((file_name, function_name, line_number))
1161    else:
1162      cls._frames_to_skip.add((file_name, function_name))
1163
1164
1165def _get_thread_id():
1166  """Gets id of current thread, suitable for logging as an unsigned quantity.
1167
1168  If pywrapbase is linked, returns GetTID() for the thread ID to be
1169  consistent with C++ logging.  Otherwise, returns the numeric thread id.
1170  The quantities are made unsigned by masking with 2*sys.maxint + 1.
1171
1172  Returns:
1173    Thread ID unique to this process (unsigned)
1174  """
1175  thread_id = threading.get_ident()
1176  return thread_id & _THREAD_ID_MASK
1177
1178
1179def get_absl_logger():
1180  """Returns the absl logger instance."""
1181  return _absl_logger
1182
1183
1184def get_absl_handler():
1185  """Returns the absl handler instance."""
1186  return _absl_handler
1187
1188
1189def use_python_logging(quiet=False):
1190  """Uses the python implementation of the logging code.
1191
1192  Args:
1193    quiet: No logging message about switching logging type.
1194  """
1195  get_absl_handler().activate_python_handler()
1196  if not quiet:
1197    info('Restoring pure python logging')
1198
1199
1200_attempted_to_remove_stderr_stream_handlers = False
1201
1202
1203def use_absl_handler():
1204  """Uses the ABSL logging handler for logging.
1205
1206  This method is called in :func:`app.run()<absl.app.run>` so the absl handler
1207  is used in absl apps.
1208  """
1209  global _attempted_to_remove_stderr_stream_handlers
1210  if not _attempted_to_remove_stderr_stream_handlers:
1211    # The absl handler logs to stderr by default. To prevent double logging to
1212    # stderr, the following code tries its best to remove other handlers that
1213    # emit to stderr. Those handlers are most commonly added when
1214    # logging.info/debug is called before calling use_absl_handler().
1215    handlers = [
1216        h for h in logging.root.handlers
1217        if isinstance(h, logging.StreamHandler) and h.stream == sys.stderr]
1218    for h in handlers:
1219      logging.root.removeHandler(h)
1220    _attempted_to_remove_stderr_stream_handlers = True
1221
1222  absl_handler = get_absl_handler()
1223  if absl_handler not in logging.root.handlers:
1224    logging.root.addHandler(absl_handler)
1225    FLAGS['verbosity']._update_logging_levels()  # pylint: disable=protected-access
1226    FLAGS['logger_levels']._update_logger_levels()  # pylint: disable=protected-access
1227
1228
1229def _initialize():
1230  """Initializes loggers and handlers."""
1231  global _absl_logger, _absl_handler
1232
1233  if _absl_logger:
1234    return
1235
1236  original_logger_class = logging.getLoggerClass()
1237  logging.setLoggerClass(ABSLLogger)
1238  _absl_logger = logging.getLogger('absl')
1239  logging.setLoggerClass(original_logger_class)
1240
1241  python_logging_formatter = PythonFormatter()
1242  _absl_handler = ABSLHandler(python_logging_formatter)
1243
1244
1245_initialize()
1246