1 /*
2 ** Copyright 2014, The Android Open Source Project
3 **
4 ** Licensed under the Apache License, Version 2.0 (the "License");
5 ** you may not use this file except in compliance with the License.
6 ** You may obtain a copy of the License at
7 **
8 ** http://www.apache.org/licenses/LICENSE-2.0
9 **
10 ** Unless required by applicable law or agreed to in writing, software
11 ** distributed under the License is distributed on an "AS IS" BASIS,
12 ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 ** See the License for the specific language governing permissions and
14 ** limitations under the License.
15 */
16
17 #include <log/log_properties.h>
18
19 #include <ctype.h>
20 #include <pthread.h>
21 #include <stdlib.h>
22 #include <string.h>
23 #define _REALLY_INCLUDE_SYS__SYSTEM_PROPERTIES_H_
24 #include <sys/_system_properties.h>
25 #include <unistd.h>
26
27 #include <private/android_logger.h>
28
29 #include "log_portability.h"
30
31 static pthread_mutex_t lock_loggable = PTHREAD_MUTEX_INITIALIZER;
32
lock()33 static int lock() {
34 /*
35 * If we trigger a signal handler in the middle of locked activity and the
36 * signal handler logs a message, we could get into a deadlock state.
37 */
38 /*
39 * Any contention, and we can turn around and use the non-cached method
40 * in less time than the system call associated with a mutex to deal with
41 * the contention.
42 */
43 return pthread_mutex_trylock(&lock_loggable);
44 }
45
unlock()46 static void unlock() {
47 pthread_mutex_unlock(&lock_loggable);
48 }
49
50 struct cache {
51 const prop_info* pinfo;
52 uint32_t serial;
53 };
54
55 struct cache_char {
56 struct cache cache;
57 unsigned char c;
58 };
59
check_cache(struct cache * cache)60 static int check_cache(struct cache* cache) {
61 return cache->pinfo && __system_property_serial(cache->pinfo) != cache->serial;
62 }
63
64 #define BOOLEAN_TRUE 0xFF
65 #define BOOLEAN_FALSE 0xFE
66
refresh_cache(struct cache_char * cache,const char * key)67 static void refresh_cache(struct cache_char* cache, const char* key) {
68 char buf[PROP_VALUE_MAX];
69
70 if (!cache->cache.pinfo) {
71 cache->cache.pinfo = __system_property_find(key);
72 if (!cache->cache.pinfo) {
73 return;
74 }
75 }
76 cache->cache.serial = __system_property_serial(cache->cache.pinfo);
77 __system_property_read(cache->cache.pinfo, 0, buf);
78 switch (buf[0]) {
79 case 't':
80 case 'T':
81 cache->c = strcasecmp(buf + 1, "rue") ? buf[0] : BOOLEAN_TRUE;
82 break;
83 case 'f':
84 case 'F':
85 cache->c = strcasecmp(buf + 1, "alse") ? buf[0] : BOOLEAN_FALSE;
86 break;
87 default:
88 cache->c = buf[0];
89 }
90 }
91
__android_log_level(const char * tag,size_t len,int default_prio)92 static int __android_log_level(const char* tag, size_t len, int default_prio) {
93 /* sizeof() is used on this array below */
94 static const char log_namespace[] = "persist.log.tag.";
95 static const size_t base_offset = 8; /* skip "persist." */
96 /* calculate the size of our key temporary buffer */
97 const size_t taglen = tag ? len : 0;
98 /* sizeof(log_namespace) = strlen(log_namespace) + 1 */
99 char key[sizeof(log_namespace) + taglen];
100 char* kp;
101 size_t i;
102 char c = 0;
103 /*
104 * Single layer cache of four properties. Priorities are:
105 * log.tag.<tag>
106 * persist.log.tag.<tag>
107 * log.tag
108 * persist.log.tag
109 * Where the missing tag matches all tags and becomes the
110 * system global default. We do not support ro.log.tag* .
111 */
112 static char* last_tag;
113 static size_t last_tag_len;
114 static uint32_t global_serial;
115 /* some compilers erroneously see uninitialized use. !not_locked */
116 uint32_t current_global_serial = 0;
117 static struct cache_char tag_cache[2];
118 static struct cache_char global_cache[2];
119 int change_detected;
120 int global_change_detected;
121 int not_locked;
122
123 strcpy(key, log_namespace);
124
125 global_change_detected = change_detected = not_locked = lock();
126
127 if (!not_locked) {
128 /*
129 * check all known serial numbers to changes.
130 */
131 for (i = 0; i < (sizeof(tag_cache) / sizeof(tag_cache[0])); ++i) {
132 if (check_cache(&tag_cache[i].cache)) {
133 change_detected = 1;
134 }
135 }
136 for (i = 0; i < (sizeof(global_cache) / sizeof(global_cache[0])); ++i) {
137 if (check_cache(&global_cache[i].cache)) {
138 global_change_detected = 1;
139 }
140 }
141
142 current_global_serial = __system_property_area_serial();
143 if (current_global_serial != global_serial) {
144 change_detected = 1;
145 global_change_detected = 1;
146 }
147 }
148
149 if (taglen) {
150 int local_change_detected = change_detected;
151 if (!not_locked) {
152 if (!last_tag || !last_tag[0] || (last_tag[0] != tag[0]) ||
153 strncmp(last_tag + 1, tag + 1, last_tag_len - 1)) {
154 /* invalidate log.tag.<tag> cache */
155 for (i = 0; i < (sizeof(tag_cache) / sizeof(tag_cache[0])); ++i) {
156 tag_cache[i].cache.pinfo = NULL;
157 tag_cache[i].c = '\0';
158 }
159 if (last_tag) last_tag[0] = '\0';
160 local_change_detected = 1;
161 }
162 if (!last_tag || !last_tag[0]) {
163 if (!last_tag) {
164 last_tag = static_cast<char*>(calloc(1, len + 1));
165 last_tag_len = 0;
166 if (last_tag) last_tag_len = len + 1;
167 } else if (len >= last_tag_len) {
168 last_tag = static_cast<char*>(realloc(last_tag, len + 1));
169 last_tag_len = 0;
170 if (last_tag) last_tag_len = len + 1;
171 }
172 if (last_tag) {
173 strncpy(last_tag, tag, len);
174 last_tag[len] = '\0';
175 }
176 }
177 }
178 strncpy(key + sizeof(log_namespace) - 1, tag, len);
179 key[sizeof(log_namespace) - 1 + len] = '\0';
180
181 kp = key;
182 for (i = 0; i < (sizeof(tag_cache) / sizeof(tag_cache[0])); ++i) {
183 struct cache_char* cache = &tag_cache[i];
184 struct cache_char temp_cache;
185
186 if (not_locked) {
187 temp_cache.cache.pinfo = NULL;
188 temp_cache.c = '\0';
189 cache = &temp_cache;
190 }
191 if (local_change_detected) {
192 refresh_cache(cache, kp);
193 }
194
195 if (cache->c) {
196 c = cache->c;
197 break;
198 }
199
200 kp = key + base_offset;
201 }
202 }
203
204 switch (toupper(c)) { /* if invalid, resort to global */
205 case 'V':
206 case 'D':
207 case 'I':
208 case 'W':
209 case 'E':
210 case 'F': /* Not officially supported */
211 case 'A':
212 case 'S':
213 case BOOLEAN_FALSE: /* Not officially supported */
214 break;
215 default:
216 /* clear '.' after log.tag */
217 key[sizeof(log_namespace) - 2] = '\0';
218
219 kp = key;
220 for (i = 0; i < (sizeof(global_cache) / sizeof(global_cache[0])); ++i) {
221 struct cache_char* cache = &global_cache[i];
222 struct cache_char temp_cache;
223
224 if (not_locked) {
225 temp_cache = *cache;
226 if (temp_cache.cache.pinfo != cache->cache.pinfo) { /* check atomic */
227 temp_cache.cache.pinfo = NULL;
228 temp_cache.c = '\0';
229 }
230 cache = &temp_cache;
231 }
232 if (global_change_detected) {
233 refresh_cache(cache, kp);
234 }
235
236 if (cache->c) {
237 c = cache->c;
238 break;
239 }
240
241 kp = key + base_offset;
242 }
243 break;
244 }
245
246 if (!not_locked) {
247 global_serial = current_global_serial;
248 unlock();
249 }
250
251 switch (toupper(c)) {
252 /* clang-format off */
253 case 'V': return ANDROID_LOG_VERBOSE;
254 case 'D': return ANDROID_LOG_DEBUG;
255 case 'I': return ANDROID_LOG_INFO;
256 case 'W': return ANDROID_LOG_WARN;
257 case 'E': return ANDROID_LOG_ERROR;
258 case 'F': /* FALLTHRU */ /* Not officially supported */
259 case 'A': return ANDROID_LOG_FATAL;
260 case BOOLEAN_FALSE: /* FALLTHRU */ /* Not Officially supported */
261 case 'S': return -1; /* ANDROID_LOG_SUPPRESS */
262 /* clang-format on */
263 }
264 return default_prio;
265 }
266
__android_log_is_loggable_len(int prio,const char * tag,size_t len,int default_prio)267 int __android_log_is_loggable_len(int prio, const char* tag, size_t len, int default_prio) {
268 int logLevel = __android_log_level(tag, len, default_prio);
269 return logLevel >= 0 && prio >= logLevel;
270 }
271
__android_log_is_loggable(int prio,const char * tag,int default_prio)272 int __android_log_is_loggable(int prio, const char* tag, int default_prio) {
273 int logLevel = __android_log_level(tag, (tag && *tag) ? strlen(tag) : 0, default_prio);
274 return logLevel >= 0 && prio >= logLevel;
275 }
276
__android_log_is_debuggable()277 int __android_log_is_debuggable() {
278 static uint32_t serial;
279 static struct cache_char tag_cache;
280 static const char key[] = "ro.debuggable";
281 int ret;
282
283 if (tag_cache.c) { /* ro property does not change after set */
284 ret = tag_cache.c == '1';
285 } else if (lock()) {
286 struct cache_char temp_cache = {{NULL, 0xFFFFFFFF}, '\0'};
287 refresh_cache(&temp_cache, key);
288 ret = temp_cache.c == '1';
289 } else {
290 int change_detected = check_cache(&tag_cache.cache);
291 uint32_t current_serial = __system_property_area_serial();
292 if (current_serial != serial) {
293 change_detected = 1;
294 }
295 if (change_detected) {
296 refresh_cache(&tag_cache, key);
297 serial = current_serial;
298 }
299 ret = tag_cache.c == '1';
300
301 unlock();
302 }
303
304 return ret;
305 }
306
307 /*
308 * For properties that are read often, but generally remain constant.
309 * Since a change is rare, we will accept a trylock failure gracefully.
310 * Use a separate lock from is_loggable to keep contention down b/25563384.
311 */
312 struct cache2_char {
313 pthread_mutex_t lock;
314 uint32_t serial;
315 const char* key_persist;
316 struct cache_char cache_persist;
317 const char* key_ro;
318 struct cache_char cache_ro;
319 unsigned char (*const evaluate)(const struct cache2_char* self);
320 };
321
do_cache2_char(struct cache2_char * self)322 static inline unsigned char do_cache2_char(struct cache2_char* self) {
323 uint32_t current_serial;
324 int change_detected;
325 unsigned char c;
326
327 if (pthread_mutex_trylock(&self->lock)) {
328 /* We are willing to accept some race in this context */
329 return self->evaluate(self);
330 }
331
332 change_detected = check_cache(&self->cache_persist.cache) || check_cache(&self->cache_ro.cache);
333 current_serial = __system_property_area_serial();
334 if (current_serial != self->serial) {
335 change_detected = 1;
336 }
337 if (change_detected) {
338 refresh_cache(&self->cache_persist, self->key_persist);
339 refresh_cache(&self->cache_ro, self->key_ro);
340 self->serial = current_serial;
341 }
342 c = self->evaluate(self);
343
344 pthread_mutex_unlock(&self->lock);
345
346 return c;
347 }
348
evaluate_persist_ro(const struct cache2_char * self)349 static unsigned char evaluate_persist_ro(const struct cache2_char* self) {
350 unsigned char c = self->cache_persist.c;
351
352 if (c) {
353 return c;
354 }
355
356 return self->cache_ro.c;
357 }
358
359 /*
360 * Timestamp state generally remains constant, but can change at any time
361 * to handle developer requirements.
362 */
android_log_clockid()363 clockid_t android_log_clockid() {
364 static struct cache2_char clockid = {PTHREAD_MUTEX_INITIALIZER, 0,
365 "persist.logd.timestamp", {{NULL, 0xFFFFFFFF}, '\0'},
366 "ro.logd.timestamp", {{NULL, 0xFFFFFFFF}, '\0'},
367 evaluate_persist_ro};
368
369 return (tolower(do_cache2_char(&clockid)) == 'm') ? CLOCK_MONOTONIC : CLOCK_REALTIME;
370 }
371
372 /*
373 * Security state generally remains constant, but the DO must be able
374 * to turn off logging should it become spammy after an attack is detected.
375 */
evaluate_security(const struct cache2_char * self)376 static unsigned char evaluate_security(const struct cache2_char* self) {
377 unsigned char c = self->cache_ro.c;
378
379 return (c != BOOLEAN_FALSE) && c && (self->cache_persist.c == BOOLEAN_TRUE);
380 }
381
__android_log_security()382 int __android_log_security() {
383 static struct cache2_char security = {
384 PTHREAD_MUTEX_INITIALIZER, 0,
385 "persist.logd.security", {{NULL, 0xFFFFFFFF}, BOOLEAN_FALSE},
386 "ro.device_owner", {{NULL, 0xFFFFFFFF}, BOOLEAN_FALSE},
387 evaluate_security};
388
389 return do_cache2_char(&security);
390 }
391
392 /*
393 * Interface that represents the logd buffer size determination so that others
394 * need not guess our intentions.
395 */
396
397 /* Property helper */
check_flag(const char * prop,const char * flag)398 static bool check_flag(const char* prop, const char* flag) {
399 const char* cp = strcasestr(prop, flag);
400 if (!cp) {
401 return false;
402 }
403 /* We only will document comma (,) */
404 static const char sep[] = ",:;|+ \t\f";
405 if ((cp != prop) && !strchr(sep, cp[-1])) {
406 return false;
407 }
408 cp += strlen(flag);
409 return !*cp || !!strchr(sep, *cp);
410 }
411
412 /* cache structure */
413 struct cache_property {
414 struct cache cache;
415 char property[PROP_VALUE_MAX];
416 };
417
refresh_cache_property(struct cache_property * cache,const char * key)418 static void refresh_cache_property(struct cache_property* cache, const char* key) {
419 if (!cache->cache.pinfo) {
420 cache->cache.pinfo = __system_property_find(key);
421 if (!cache->cache.pinfo) {
422 return;
423 }
424 }
425 cache->cache.serial = __system_property_serial(cache->cache.pinfo);
426 __system_property_read(cache->cache.pinfo, 0, cache->property);
427 }
428
429 /* get boolean with the logger twist that supports eng adjustments */
__android_logger_property_get_bool(const char * key,int flag)430 bool __android_logger_property_get_bool(const char* key, int flag) {
431 struct cache_property property = {{NULL, 0xFFFFFFFF}, {0}};
432 if (flag & BOOL_DEFAULT_FLAG_PERSIST) {
433 char newkey[strlen("persist.") + strlen(key) + 1];
434 snprintf(newkey, sizeof(newkey), "ro.%s", key);
435 refresh_cache_property(&property, newkey);
436 property.cache.pinfo = NULL;
437 property.cache.serial = 0xFFFFFFFF;
438 snprintf(newkey, sizeof(newkey), "persist.%s", key);
439 refresh_cache_property(&property, newkey);
440 property.cache.pinfo = NULL;
441 property.cache.serial = 0xFFFFFFFF;
442 }
443
444 refresh_cache_property(&property, key);
445
446 if (check_flag(property.property, "true")) {
447 return true;
448 }
449 if (check_flag(property.property, "false")) {
450 return false;
451 }
452 if (property.property[0]) {
453 flag &= ~(BOOL_DEFAULT_FLAG_ENG | BOOL_DEFAULT_FLAG_SVELTE);
454 }
455 if (check_flag(property.property, "eng")) {
456 flag |= BOOL_DEFAULT_FLAG_ENG;
457 }
458 /* this is really a "not" flag */
459 if (check_flag(property.property, "svelte")) {
460 flag |= BOOL_DEFAULT_FLAG_SVELTE;
461 }
462
463 /* Sanity Check */
464 if (flag & (BOOL_DEFAULT_FLAG_SVELTE | BOOL_DEFAULT_FLAG_ENG)) {
465 flag &= ~BOOL_DEFAULT_FLAG_TRUE_FALSE;
466 flag |= BOOL_DEFAULT_TRUE;
467 }
468
469 if ((flag & BOOL_DEFAULT_FLAG_SVELTE) &&
470 __android_logger_property_get_bool("ro.config.low_ram", BOOL_DEFAULT_FALSE)) {
471 return false;
472 }
473 if ((flag & BOOL_DEFAULT_FLAG_ENG) && !__android_log_is_debuggable()) {
474 return false;
475 }
476
477 return (flag & BOOL_DEFAULT_FLAG_TRUE_FALSE) != BOOL_DEFAULT_FALSE;
478 }
479
__android_logger_valid_buffer_size(unsigned long value)480 bool __android_logger_valid_buffer_size(unsigned long value) {
481 static long pages, pagesize;
482 unsigned long maximum;
483
484 if ((value < LOG_BUFFER_MIN_SIZE) || (LOG_BUFFER_MAX_SIZE < value)) {
485 return false;
486 }
487
488 if (!pages) {
489 pages = sysconf(_SC_PHYS_PAGES);
490 }
491 if (pages < 1) {
492 return true;
493 }
494
495 if (!pagesize) {
496 pagesize = sysconf(_SC_PAGESIZE);
497 if (pagesize <= 1) {
498 pagesize = PAGE_SIZE;
499 }
500 }
501
502 /* maximum memory impact a somewhat arbitrary ~3% */
503 pages = (pages + 31) / 32;
504 maximum = pages * pagesize;
505
506 if ((maximum < LOG_BUFFER_MIN_SIZE) || (LOG_BUFFER_MAX_SIZE < maximum)) {
507 return true;
508 }
509
510 return value <= maximum;
511 }
512
513 struct cache2_property_size {
514 pthread_mutex_t lock;
515 uint32_t serial;
516 const char* key_persist;
517 struct cache_property cache_persist;
518 const char* key_ro;
519 struct cache_property cache_ro;
520 unsigned long (*const evaluate)(const struct cache2_property_size* self);
521 };
522
do_cache2_property_size(struct cache2_property_size * self)523 static inline unsigned long do_cache2_property_size(struct cache2_property_size* self) {
524 uint32_t current_serial;
525 int change_detected;
526 unsigned long v;
527
528 if (pthread_mutex_trylock(&self->lock)) {
529 /* We are willing to accept some race in this context */
530 return self->evaluate(self);
531 }
532
533 change_detected = check_cache(&self->cache_persist.cache) || check_cache(&self->cache_ro.cache);
534 current_serial = __system_property_area_serial();
535 if (current_serial != self->serial) {
536 change_detected = 1;
537 }
538 if (change_detected) {
539 refresh_cache_property(&self->cache_persist, self->key_persist);
540 refresh_cache_property(&self->cache_ro, self->key_ro);
541 self->serial = current_serial;
542 }
543 v = self->evaluate(self);
544
545 pthread_mutex_unlock(&self->lock);
546
547 return v;
548 }
549
property_get_size_from_cache(const struct cache_property * cache)550 static unsigned long property_get_size_from_cache(const struct cache_property* cache) {
551 char* cp;
552 unsigned long value = strtoul(cache->property, &cp, 10);
553
554 switch (*cp) {
555 case 'm':
556 case 'M':
557 value *= 1024;
558 [[fallthrough]];
559 case 'k':
560 case 'K':
561 value *= 1024;
562 [[fallthrough]];
563 case '\0':
564 break;
565
566 default:
567 value = 0;
568 }
569
570 if (!__android_logger_valid_buffer_size(value)) {
571 value = 0;
572 }
573
574 return value;
575 }
576
evaluate_property_get_size(const struct cache2_property_size * self)577 static unsigned long evaluate_property_get_size(const struct cache2_property_size* self) {
578 unsigned long size = property_get_size_from_cache(&self->cache_persist);
579 if (size) {
580 return size;
581 }
582 return property_get_size_from_cache(&self->cache_ro);
583 }
584
__android_logger_get_buffer_size(log_id_t logId)585 unsigned long __android_logger_get_buffer_size(log_id_t logId) {
586 static const char global_tunable[] = "persist.logd.size"; /* Settings App */
587 static const char global_default[] = "ro.logd.size"; /* BoardConfig.mk */
588 static struct cache2_property_size global = {
589 /* clang-format off */
590 PTHREAD_MUTEX_INITIALIZER, 0,
591 global_tunable, { { NULL, 0xFFFFFFFF }, {} },
592 global_default, { { NULL, 0xFFFFFFFF }, {} },
593 evaluate_property_get_size
594 /* clang-format on */
595 };
596 char key_persist[strlen(global_tunable) + strlen(".security") + 1];
597 char key_ro[strlen(global_default) + strlen(".security") + 1];
598 struct cache2_property_size local = {
599 /* clang-format off */
600 PTHREAD_MUTEX_INITIALIZER, 0,
601 key_persist, { { NULL, 0xFFFFFFFF }, {} },
602 key_ro, { { NULL, 0xFFFFFFFF }, {} },
603 evaluate_property_get_size
604 /* clang-format on */
605 };
606 unsigned long property_size, default_size;
607
608 default_size = do_cache2_property_size(&global);
609 if (!default_size) {
610 default_size = __android_logger_property_get_bool("ro.config.low_ram", BOOL_DEFAULT_FALSE)
611 ? LOG_BUFFER_MIN_SIZE /* 64K */
612 : LOG_BUFFER_SIZE; /* 256K */
613 }
614
615 snprintf(key_persist, sizeof(key_persist), "%s.%s", global_tunable,
616 android_log_id_to_name(logId));
617 snprintf(key_ro, sizeof(key_ro), "%s.%s", global_default, android_log_id_to_name(logId));
618 property_size = do_cache2_property_size(&local);
619
620 if (!property_size) {
621 property_size = default_size;
622 }
623
624 if (!property_size) {
625 property_size = LOG_BUFFER_SIZE;
626 }
627
628 return property_size;
629 }
630