• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2 ** Copyright 2014, The Android Open Source Project
3 **
4 ** Licensed under the Apache License, Version 2.0 (the "License");
5 ** you may not use this file except in compliance with the License.
6 ** You may obtain a copy of the License at
7 **
8 **     http://www.apache.org/licenses/LICENSE-2.0
9 **
10 ** Unless required by applicable law or agreed to in writing, software
11 ** distributed under the License is distributed on an "AS IS" BASIS,
12 ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 ** See the License for the specific language governing permissions and
14 ** limitations under the License.
15 */
16 
17 #include <ctype.h>
18 #include <pthread.h>
19 #include <stdbool.h>
20 #include <stdlib.h>
21 #include <string.h>
22 #define _REALLY_INCLUDE_SYS__SYSTEM_PROPERTIES_H_
23 #include <sys/_system_properties.h>
24 #include <unistd.h>
25 
26 #include <private/android_logger.h>
27 
28 #include "log_portability.h"
29 
30 static pthread_mutex_t lock_loggable = PTHREAD_MUTEX_INITIALIZER;
31 
lock()32 static int lock() {
33   /*
34    * If we trigger a signal handler in the middle of locked activity and the
35    * signal handler logs a message, we could get into a deadlock state.
36    */
37   /*
38    *  Any contention, and we can turn around and use the non-cached method
39    * in less time than the system call associated with a mutex to deal with
40    * the contention.
41    */
42   return pthread_mutex_trylock(&lock_loggable);
43 }
44 
unlock()45 static void unlock() {
46   pthread_mutex_unlock(&lock_loggable);
47 }
48 
49 struct cache {
50   const prop_info* pinfo;
51   uint32_t serial;
52 };
53 
54 struct cache_char {
55   struct cache cache;
56   unsigned char c;
57 };
58 
check_cache(struct cache * cache)59 static int check_cache(struct cache* cache) {
60   return cache->pinfo && __system_property_serial(cache->pinfo) != cache->serial;
61 }
62 
63 #define BOOLEAN_TRUE 0xFF
64 #define BOOLEAN_FALSE 0xFE
65 
refresh_cache(struct cache_char * cache,const char * key)66 static void refresh_cache(struct cache_char* cache, const char* key) {
67   char buf[PROP_VALUE_MAX];
68 
69   if (!cache->cache.pinfo) {
70     cache->cache.pinfo = __system_property_find(key);
71     if (!cache->cache.pinfo) {
72       return;
73     }
74   }
75   cache->cache.serial = __system_property_serial(cache->cache.pinfo);
76   __system_property_read(cache->cache.pinfo, 0, buf);
77   switch (buf[0]) {
78     case 't':
79     case 'T':
80       cache->c = strcasecmp(buf + 1, "rue") ? buf[0] : BOOLEAN_TRUE;
81       break;
82     case 'f':
83     case 'F':
84       cache->c = strcasecmp(buf + 1, "alse") ? buf[0] : BOOLEAN_FALSE;
85       break;
86     default:
87       cache->c = buf[0];
88   }
89 }
90 
__android_log_level(const char * tag,size_t len,int default_prio)91 static int __android_log_level(const char* tag, size_t len, int default_prio) {
92   /* sizeof() is used on this array below */
93   static const char log_namespace[] = "persist.log.tag.";
94   static const size_t base_offset = 8; /* skip "persist." */
95   /* calculate the size of our key temporary buffer */
96   const size_t taglen = tag ? len : 0;
97   /* sizeof(log_namespace) = strlen(log_namespace) + 1 */
98   char key[sizeof(log_namespace) + taglen];
99   char* kp;
100   size_t i;
101   char c = 0;
102   /*
103    * Single layer cache of four properties. Priorities are:
104    *    log.tag.<tag>
105    *    persist.log.tag.<tag>
106    *    log.tag
107    *    persist.log.tag
108    * Where the missing tag matches all tags and becomes the
109    * system global default. We do not support ro.log.tag* .
110    */
111   static char* last_tag;
112   static size_t last_tag_len;
113   static uint32_t global_serial;
114   /* some compilers erroneously see uninitialized use. !not_locked */
115   uint32_t current_global_serial = 0;
116   static struct cache_char tag_cache[2];
117   static struct cache_char global_cache[2];
118   int change_detected;
119   int global_change_detected;
120   int not_locked;
121 
122   strcpy(key, log_namespace);
123 
124   global_change_detected = change_detected = not_locked = lock();
125 
126   if (!not_locked) {
127     /*
128      *  check all known serial numbers to changes.
129      */
130     for (i = 0; i < (sizeof(tag_cache) / sizeof(tag_cache[0])); ++i) {
131       if (check_cache(&tag_cache[i].cache)) {
132         change_detected = 1;
133       }
134     }
135     for (i = 0; i < (sizeof(global_cache) / sizeof(global_cache[0])); ++i) {
136       if (check_cache(&global_cache[i].cache)) {
137         global_change_detected = 1;
138       }
139     }
140 
141     current_global_serial = __system_property_area_serial();
142     if (current_global_serial != global_serial) {
143       change_detected = 1;
144       global_change_detected = 1;
145     }
146   }
147 
148   if (taglen) {
149     int local_change_detected = change_detected;
150     if (!not_locked) {
151       if (!last_tag || !last_tag[0] || (last_tag[0] != tag[0]) ||
152           strncmp(last_tag + 1, tag + 1, last_tag_len - 1)) {
153         /* invalidate log.tag.<tag> cache */
154         for (i = 0; i < (sizeof(tag_cache) / sizeof(tag_cache[0])); ++i) {
155           tag_cache[i].cache.pinfo = NULL;
156           tag_cache[i].c = '\0';
157         }
158         if (last_tag) last_tag[0] = '\0';
159         local_change_detected = 1;
160       }
161       if (!last_tag || !last_tag[0]) {
162         if (!last_tag) {
163           last_tag = calloc(1, len + 1);
164           last_tag_len = 0;
165           if (last_tag) last_tag_len = len + 1;
166         } else if (len >= last_tag_len) {
167           last_tag = realloc(last_tag, len + 1);
168           last_tag_len = 0;
169           if (last_tag) last_tag_len = len + 1;
170         }
171         if (last_tag) {
172           strncpy(last_tag, tag, len);
173           last_tag[len] = '\0';
174         }
175       }
176     }
177     strncpy(key + sizeof(log_namespace) - 1, tag, len);
178     key[sizeof(log_namespace) - 1 + len] = '\0';
179 
180     kp = key;
181     for (i = 0; i < (sizeof(tag_cache) / sizeof(tag_cache[0])); ++i) {
182       struct cache_char* cache = &tag_cache[i];
183       struct cache_char temp_cache;
184 
185       if (not_locked) {
186         temp_cache.cache.pinfo = NULL;
187         temp_cache.c = '\0';
188         cache = &temp_cache;
189       }
190       if (local_change_detected) {
191         refresh_cache(cache, kp);
192       }
193 
194       if (cache->c) {
195         c = cache->c;
196         break;
197       }
198 
199       kp = key + base_offset;
200     }
201   }
202 
203   switch (toupper(c)) { /* if invalid, resort to global */
204     case 'V':
205     case 'D':
206     case 'I':
207     case 'W':
208     case 'E':
209     case 'F': /* Not officially supported */
210     case 'A':
211     case 'S':
212     case BOOLEAN_FALSE: /* Not officially supported */
213       break;
214     default:
215       /* clear '.' after log.tag */
216       key[sizeof(log_namespace) - 2] = '\0';
217 
218       kp = key;
219       for (i = 0; i < (sizeof(global_cache) / sizeof(global_cache[0])); ++i) {
220         struct cache_char* cache = &global_cache[i];
221         struct cache_char temp_cache;
222 
223         if (not_locked) {
224           temp_cache = *cache;
225           if (temp_cache.cache.pinfo != cache->cache.pinfo) { /* check atomic */
226             temp_cache.cache.pinfo = NULL;
227             temp_cache.c = '\0';
228           }
229           cache = &temp_cache;
230         }
231         if (global_change_detected) {
232           refresh_cache(cache, kp);
233         }
234 
235         if (cache->c) {
236           c = cache->c;
237           break;
238         }
239 
240         kp = key + base_offset;
241       }
242       break;
243   }
244 
245   if (!not_locked) {
246     global_serial = current_global_serial;
247     unlock();
248   }
249 
250   switch (toupper(c)) {
251     /* clang-format off */
252     case 'V': return ANDROID_LOG_VERBOSE;
253     case 'D': return ANDROID_LOG_DEBUG;
254     case 'I': return ANDROID_LOG_INFO;
255     case 'W': return ANDROID_LOG_WARN;
256     case 'E': return ANDROID_LOG_ERROR;
257     case 'F': /* FALLTHRU */ /* Not officially supported */
258     case 'A': return ANDROID_LOG_FATAL;
259     case BOOLEAN_FALSE: /* FALLTHRU */ /* Not Officially supported */
260     case 'S': return -1; /* ANDROID_LOG_SUPPRESS */
261     /* clang-format on */
262   }
263   return default_prio;
264 }
265 
__android_log_is_loggable_len(int prio,const char * tag,size_t len,int default_prio)266 LIBLOG_ABI_PUBLIC int __android_log_is_loggable_len(int prio, const char* tag,
267                                                     size_t len,
268                                                     int default_prio) {
269   int logLevel = __android_log_level(tag, len, default_prio);
270   return logLevel >= 0 && prio >= logLevel;
271 }
272 
__android_log_is_loggable(int prio,const char * tag,int default_prio)273 LIBLOG_ABI_PUBLIC int __android_log_is_loggable(int prio, const char* tag,
274                                                 int default_prio) {
275   int logLevel =
276       __android_log_level(tag, (tag && *tag) ? strlen(tag) : 0, default_prio);
277   return logLevel >= 0 && prio >= logLevel;
278 }
279 
__android_log_is_debuggable()280 LIBLOG_ABI_PUBLIC int __android_log_is_debuggable() {
281   static uint32_t serial;
282   static struct cache_char tag_cache;
283   static const char key[] = "ro.debuggable";
284   int ret;
285 
286   if (tag_cache.c) { /* ro property does not change after set */
287     ret = tag_cache.c == '1';
288   } else if (lock()) {
289     struct cache_char temp_cache = { { NULL, -1 }, '\0' };
290     refresh_cache(&temp_cache, key);
291     ret = temp_cache.c == '1';
292   } else {
293     int change_detected = check_cache(&tag_cache.cache);
294     uint32_t current_serial = __system_property_area_serial();
295     if (current_serial != serial) {
296       change_detected = 1;
297     }
298     if (change_detected) {
299       refresh_cache(&tag_cache, key);
300       serial = current_serial;
301     }
302     ret = tag_cache.c == '1';
303 
304     unlock();
305   }
306 
307   return ret;
308 }
309 
310 /*
311  * For properties that are read often, but generally remain constant.
312  * Since a change is rare, we will accept a trylock failure gracefully.
313  * Use a separate lock from is_loggable to keep contention down b/25563384.
314  */
315 struct cache2_char {
316   pthread_mutex_t lock;
317   uint32_t serial;
318   const char* key_persist;
319   struct cache_char cache_persist;
320   const char* key_ro;
321   struct cache_char cache_ro;
322   unsigned char (*const evaluate)(const struct cache2_char* self);
323 };
324 
do_cache2_char(struct cache2_char * self)325 static inline unsigned char do_cache2_char(struct cache2_char* self) {
326   uint32_t current_serial;
327   int change_detected;
328   unsigned char c;
329 
330   if (pthread_mutex_trylock(&self->lock)) {
331     /* We are willing to accept some race in this context */
332     return self->evaluate(self);
333   }
334 
335   change_detected = check_cache(&self->cache_persist.cache) ||
336                     check_cache(&self->cache_ro.cache);
337   current_serial = __system_property_area_serial();
338   if (current_serial != self->serial) {
339     change_detected = 1;
340   }
341   if (change_detected) {
342     refresh_cache(&self->cache_persist, self->key_persist);
343     refresh_cache(&self->cache_ro, self->key_ro);
344     self->serial = current_serial;
345   }
346   c = self->evaluate(self);
347 
348   pthread_mutex_unlock(&self->lock);
349 
350   return c;
351 }
352 
evaluate_persist_ro(const struct cache2_char * self)353 static unsigned char evaluate_persist_ro(const struct cache2_char* self) {
354   unsigned char c = self->cache_persist.c;
355 
356   if (c) {
357     return c;
358   }
359 
360   return self->cache_ro.c;
361 }
362 
363 /*
364  * Timestamp state generally remains constant, but can change at any time
365  * to handle developer requirements.
366  */
android_log_clockid()367 LIBLOG_ABI_PUBLIC clockid_t android_log_clockid() {
368   static struct cache2_char clockid = {
369     PTHREAD_MUTEX_INITIALIZER, 0,
370     "persist.logd.timestamp",  { { NULL, -1 }, '\0' },
371     "ro.logd.timestamp",       { { NULL, -1 }, '\0' },
372     evaluate_persist_ro
373   };
374 
375   return (tolower(do_cache2_char(&clockid)) == 'm') ? CLOCK_MONOTONIC
376                                                     : CLOCK_REALTIME;
377 }
378 
379 /*
380  * Security state generally remains constant, but the DO must be able
381  * to turn off logging should it become spammy after an attack is detected.
382  */
evaluate_security(const struct cache2_char * self)383 static unsigned char evaluate_security(const struct cache2_char* self) {
384   unsigned char c = self->cache_ro.c;
385 
386   return (c != BOOLEAN_FALSE) && c && (self->cache_persist.c == BOOLEAN_TRUE);
387 }
388 
__android_log_security()389 LIBLOG_ABI_PUBLIC int __android_log_security() {
390   static struct cache2_char security = {
391     PTHREAD_MUTEX_INITIALIZER, 0,
392     "persist.logd.security",   { { NULL, -1 }, BOOLEAN_FALSE },
393     "ro.device_owner",         { { NULL, -1 }, BOOLEAN_FALSE },
394     evaluate_security
395   };
396 
397   return do_cache2_char(&security);
398 }
399 
400 /*
401  * Interface that represents the logd buffer size determination so that others
402  * need not guess our intentions.
403  */
404 
405 /* Property helper */
check_flag(const char * prop,const char * flag)406 static bool check_flag(const char* prop, const char* flag) {
407   const char* cp = strcasestr(prop, flag);
408   if (!cp) {
409     return false;
410   }
411   /* We only will document comma (,) */
412   static const char sep[] = ",:;|+ \t\f";
413   if ((cp != prop) && !strchr(sep, cp[-1])) {
414     return false;
415   }
416   cp += strlen(flag);
417   return !*cp || !!strchr(sep, *cp);
418 }
419 
420 /* cache structure */
421 struct cache_property {
422   struct cache cache;
423   char property[PROP_VALUE_MAX];
424 };
425 
refresh_cache_property(struct cache_property * cache,const char * key)426 static void refresh_cache_property(struct cache_property* cache,
427                                    const char* key) {
428   if (!cache->cache.pinfo) {
429     cache->cache.pinfo = __system_property_find(key);
430     if (!cache->cache.pinfo) {
431       return;
432     }
433   }
434   cache->cache.serial = __system_property_serial(cache->cache.pinfo);
435   __system_property_read(cache->cache.pinfo, 0, cache->property);
436 }
437 
438 /* get boolean with the logger twist that supports eng adjustments */
__android_logger_property_get_bool(const char * key,int flag)439 LIBLOG_ABI_PRIVATE bool __android_logger_property_get_bool(const char* key,
440                                                            int flag) {
441   struct cache_property property = { { NULL, -1 }, { 0 } };
442   if (flag & BOOL_DEFAULT_FLAG_PERSIST) {
443     char newkey[strlen("persist.") + strlen(key) + 1];
444     snprintf(newkey, sizeof(newkey), "ro.%s", key);
445     refresh_cache_property(&property, newkey);
446     property.cache.pinfo = NULL;
447     property.cache.serial = -1;
448     snprintf(newkey, sizeof(newkey), "persist.%s", key);
449     refresh_cache_property(&property, newkey);
450     property.cache.pinfo = NULL;
451     property.cache.serial = -1;
452   }
453 
454   refresh_cache_property(&property, key);
455 
456   if (check_flag(property.property, "true")) {
457     return true;
458   }
459   if (check_flag(property.property, "false")) {
460     return false;
461   }
462   if (property.property[0]) {
463     flag &= ~(BOOL_DEFAULT_FLAG_ENG | BOOL_DEFAULT_FLAG_SVELTE);
464   }
465   if (check_flag(property.property, "eng")) {
466     flag |= BOOL_DEFAULT_FLAG_ENG;
467   }
468   /* this is really a "not" flag */
469   if (check_flag(property.property, "svelte")) {
470     flag |= BOOL_DEFAULT_FLAG_SVELTE;
471   }
472 
473   /* Sanity Check */
474   if (flag & (BOOL_DEFAULT_FLAG_SVELTE | BOOL_DEFAULT_FLAG_ENG)) {
475     flag &= ~BOOL_DEFAULT_FLAG_TRUE_FALSE;
476     flag |= BOOL_DEFAULT_TRUE;
477   }
478 
479   if ((flag & BOOL_DEFAULT_FLAG_SVELTE) &&
480       __android_logger_property_get_bool("ro.config.low_ram",
481                                          BOOL_DEFAULT_FALSE)) {
482     return false;
483   }
484   if ((flag & BOOL_DEFAULT_FLAG_ENG) && !__android_log_is_debuggable()) {
485     return false;
486   }
487 
488   return (flag & BOOL_DEFAULT_FLAG_TRUE_FALSE) != BOOL_DEFAULT_FALSE;
489 }
490 
__android_logger_valid_buffer_size(unsigned long value)491 LIBLOG_ABI_PRIVATE bool __android_logger_valid_buffer_size(unsigned long value) {
492   static long pages, pagesize;
493   unsigned long maximum;
494 
495   if ((value < LOG_BUFFER_MIN_SIZE) || (LOG_BUFFER_MAX_SIZE < value)) {
496     return false;
497   }
498 
499   if (!pages) {
500     pages = sysconf(_SC_PHYS_PAGES);
501   }
502   if (pages < 1) {
503     return true;
504   }
505 
506   if (!pagesize) {
507     pagesize = sysconf(_SC_PAGESIZE);
508     if (pagesize <= 1) {
509       pagesize = PAGE_SIZE;
510     }
511   }
512 
513   /* maximum memory impact a somewhat arbitrary ~3% */
514   pages = (pages + 31) / 32;
515   maximum = pages * pagesize;
516 
517   if ((maximum < LOG_BUFFER_MIN_SIZE) || (LOG_BUFFER_MAX_SIZE < maximum)) {
518     return true;
519   }
520 
521   return value <= maximum;
522 }
523 
524 struct cache2_property_size {
525   pthread_mutex_t lock;
526   uint32_t serial;
527   const char* key_persist;
528   struct cache_property cache_persist;
529   const char* key_ro;
530   struct cache_property cache_ro;
531   unsigned long (*const evaluate)(const struct cache2_property_size* self);
532 };
533 
do_cache2_property_size(struct cache2_property_size * self)534 static inline unsigned long do_cache2_property_size(
535     struct cache2_property_size* self) {
536   uint32_t current_serial;
537   int change_detected;
538   unsigned long v;
539 
540   if (pthread_mutex_trylock(&self->lock)) {
541     /* We are willing to accept some race in this context */
542     return self->evaluate(self);
543   }
544 
545   change_detected = check_cache(&self->cache_persist.cache) ||
546                     check_cache(&self->cache_ro.cache);
547   current_serial = __system_property_area_serial();
548   if (current_serial != self->serial) {
549     change_detected = 1;
550   }
551   if (change_detected) {
552     refresh_cache_property(&self->cache_persist, self->key_persist);
553     refresh_cache_property(&self->cache_ro, self->key_ro);
554     self->serial = current_serial;
555   }
556   v = self->evaluate(self);
557 
558   pthread_mutex_unlock(&self->lock);
559 
560   return v;
561 }
562 
property_get_size_from_cache(const struct cache_property * cache)563 static unsigned long property_get_size_from_cache(
564     const struct cache_property* cache) {
565   char* cp;
566   unsigned long value = strtoul(cache->property, &cp, 10);
567 
568   switch (*cp) {
569     case 'm':
570     case 'M':
571       value *= 1024;
572     /* FALLTHRU */
573     case 'k':
574     case 'K':
575       value *= 1024;
576     /* FALLTHRU */
577     case '\0':
578       break;
579 
580     default:
581       value = 0;
582   }
583 
584   if (!__android_logger_valid_buffer_size(value)) {
585     value = 0;
586   }
587 
588   return value;
589 }
590 
evaluate_property_get_size(const struct cache2_property_size * self)591 static unsigned long evaluate_property_get_size(
592     const struct cache2_property_size* self) {
593   unsigned long size = property_get_size_from_cache(&self->cache_persist);
594   if (size) {
595     return size;
596   }
597   return property_get_size_from_cache(&self->cache_ro);
598 }
599 
__android_logger_get_buffer_size(log_id_t logId)600 LIBLOG_ABI_PRIVATE unsigned long __android_logger_get_buffer_size(log_id_t logId) {
601   static const char global_tunable[] = "persist.logd.size"; /* Settings App */
602   static const char global_default[] = "ro.logd.size";      /* BoardConfig.mk */
603   static struct cache2_property_size global = {
604     /* clang-format off */
605     PTHREAD_MUTEX_INITIALIZER, 0,
606     global_tunable, { { NULL, -1 }, {} },
607     global_default, { { NULL, -1 }, {} },
608     evaluate_property_get_size
609     /* clang-format on */
610   };
611   char key_persist[strlen(global_tunable) + strlen(".security") + 1];
612   char key_ro[strlen(global_default) + strlen(".security") + 1];
613   struct cache2_property_size local = {
614     /* clang-format off */
615     PTHREAD_MUTEX_INITIALIZER, 0,
616     key_persist, { { NULL, -1 }, {} },
617     key_ro,      { { NULL, -1 }, {} },
618     evaluate_property_get_size
619     /* clang-format on */
620   };
621   unsigned long property_size, default_size;
622 
623   default_size = do_cache2_property_size(&global);
624   if (!default_size) {
625     default_size = __android_logger_property_get_bool("ro.config.low_ram",
626                                                       BOOL_DEFAULT_FALSE)
627                        ? LOG_BUFFER_MIN_SIZE /* 64K  */
628                        : LOG_BUFFER_SIZE;    /* 256K */
629   }
630 
631   snprintf(key_persist, sizeof(key_persist), "%s.%s", global_tunable,
632            android_log_id_to_name(logId));
633   snprintf(key_ro, sizeof(key_ro), "%s.%s", global_default,
634            android_log_id_to_name(logId));
635   property_size = do_cache2_property_size(&local);
636 
637   if (!property_size) {
638     property_size = default_size;
639   }
640 
641   if (!property_size) {
642     property_size = LOG_BUFFER_SIZE;
643   }
644 
645   return property_size;
646 }
647