• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2013 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #define LOG_TAG "lowmemorykiller"
18 
19 #include <errno.h>
20 #include <inttypes.h>
21 #include <pwd.h>
22 #include <sched.h>
23 #include <stdbool.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <sys/cdefs.h>
27 #include <sys/epoll.h>
28 #include <sys/eventfd.h>
29 #include <sys/mman.h>
30 #include <sys/pidfd.h>
31 #include <sys/socket.h>
32 #include <sys/syscall.h>
33 #include <sys/sysinfo.h>
34 #include <time.h>
35 #include <unistd.h>
36 
37 #include <algorithm>
38 #include <array>
39 #include <shared_mutex>
40 
41 #include <cutils/properties.h>
42 #include <cutils/sockets.h>
43 #include <liblmkd_utils.h>
44 #include <lmkd.h>
45 #include <lmkd_hooks.h>
46 #include <log/log.h>
47 #include <log/log_event_list.h>
48 #include <log/log_time.h>
49 #include <private/android_filesystem_config.h>
50 #include <processgroup/processgroup.h>
51 #include <psi/psi.h>
52 
53 #include "reaper.h"
54 #include "statslog.h"
55 #include "watchdog.h"
56 
57 #define BPF_FD_JUST_USE_INT
58 #include "BpfSyscallWrappers.h"
59 
60 /*
61  * Define LMKD_TRACE_KILLS to record lmkd kills in kernel traces
62  * to profile and correlate with OOM kills
63  */
64 #ifdef LMKD_TRACE_KILLS
65 
66 #define ATRACE_TAG ATRACE_TAG_ALWAYS
67 #include <cutils/trace.h>
68 
trace_kill_start(const char * desc)69 static inline void trace_kill_start(const char *desc) {
70     ATRACE_BEGIN(desc);
71 }
72 
trace_kill_end()73 static inline void trace_kill_end() {
74     ATRACE_END();
75 }
76 
77 #else /* LMKD_TRACE_KILLS */
78 
trace_kill_start(const char *)79 static inline void trace_kill_start(const char *) {}
trace_kill_end()80 static inline void trace_kill_end() {}
81 
82 #endif /* LMKD_TRACE_KILLS */
83 
84 #ifndef __unused
85 #define __unused __attribute__((__unused__))
86 #endif
87 
88 #define ZONEINFO_PATH "/proc/zoneinfo"
89 #define MEMINFO_PATH "/proc/meminfo"
90 #define VMSTAT_PATH "/proc/vmstat"
91 #define PROC_STATUS_TGID_FIELD "Tgid:"
92 #define PROC_STATUS_RSS_FIELD "VmRSS:"
93 #define PROC_STATUS_SWAP_FIELD "VmSwap:"
94 #define LINE_MAX 128
95 
96 #define PERCEPTIBLE_APP_ADJ 200
97 
98 /* Android Logger event logtags (see event.logtags) */
99 #define KILLINFO_LOG_TAG 10195355
100 
101 /* gid containing AID_SYSTEM required */
102 #define INKERNEL_MINFREE_PATH "/sys/module/lowmemorykiller/parameters/minfree"
103 #define INKERNEL_ADJ_PATH "/sys/module/lowmemorykiller/parameters/adj"
104 
105 #define EIGHT_MEGA (1 << 23)
106 
107 #define TARGET_UPDATE_MIN_INTERVAL_MS 1000
108 #define THRASHING_RESET_INTERVAL_MS 1000
109 
110 #define NS_PER_MS (NS_PER_SEC / MS_PER_SEC)
111 #define US_PER_MS (US_PER_SEC / MS_PER_SEC)
112 
113 /* Defined as ProcessList.SYSTEM_ADJ in ProcessList.java */
114 #define SYSTEM_ADJ (-900)
115 
116 #define STRINGIFY(x) STRINGIFY_INTERNAL(x)
117 #define STRINGIFY_INTERNAL(x) #x
118 
119 /*
120  * Read lmk property with persist.device_config.lmkd_native.<name> overriding ro.lmk.<name>
121  * persist.device_config.lmkd_native.* properties are being set by experiments. If a new property
122  * can be controlled by an experiment then use GET_LMK_PROPERTY instead of property_get_xxx and
123  * add "on property" triggers in lmkd.rc to react to the experiment flag changes.
124  */
125 #define GET_LMK_PROPERTY(type, name, def) \
126     property_get_##type("persist.device_config.lmkd_native." name, \
127         property_get_##type("ro.lmk." name, def))
128 
129 /*
130  * PSI monitor tracking window size.
131  * PSI monitor generates events at most once per window,
132  * therefore we poll memory state for the duration of
133  * PSI_WINDOW_SIZE_MS after the event happens.
134  */
135 #define PSI_WINDOW_SIZE_MS 1000
136 /* Polling period after PSI signal when pressure is high */
137 #define PSI_POLL_PERIOD_SHORT_MS 10
138 /* Polling period after PSI signal when pressure is low */
139 #define PSI_POLL_PERIOD_LONG_MS 100
140 
141 #define FAIL_REPORT_RLIMIT_MS 1000
142 
143 /*
144  * System property defaults
145  */
146 /* ro.lmk.swap_free_low_percentage property defaults */
147 #define DEF_LOW_SWAP 10
148 /* ro.lmk.thrashing_limit property defaults */
149 #define DEF_THRASHING_LOWRAM 30
150 #define DEF_THRASHING 100
151 /* ro.lmk.thrashing_limit_decay property defaults */
152 #define DEF_THRASHING_DECAY_LOWRAM 50
153 #define DEF_THRASHING_DECAY 10
154 /* ro.lmk.psi_partial_stall_ms property defaults */
155 #define DEF_PARTIAL_STALL_LOWRAM 200
156 #define DEF_PARTIAL_STALL 70
157 /* ro.lmk.psi_complete_stall_ms property defaults */
158 #define DEF_COMPLETE_STALL 700
159 
160 #define LMKD_REINIT_PROP "lmkd.reinit"
161 
162 #define WATCHDOG_TIMEOUT_SEC 2
163 
164 /* default to old in-kernel interface if no memory pressure events */
165 static bool use_inkernel_interface = true;
166 static bool has_inkernel_module;
167 
168 /* memory pressure levels */
169 enum vmpressure_level {
170     VMPRESS_LEVEL_LOW = 0,
171     VMPRESS_LEVEL_MEDIUM,
172     VMPRESS_LEVEL_CRITICAL,
173     VMPRESS_LEVEL_COUNT
174 };
175 
176 static const char *level_name[] = {
177     "low",
178     "medium",
179     "critical"
180 };
181 
182 struct {
183     int64_t min_nr_free_pages; /* recorded but not used yet */
184     int64_t max_nr_free_pages;
185 } low_pressure_mem = { -1, -1 };
186 
187 struct psi_threshold {
188     enum psi_stall_type stall_type;
189     int threshold_ms;
190 };
191 
192 static int level_oomadj[VMPRESS_LEVEL_COUNT];
193 static int mpevfd[VMPRESS_LEVEL_COUNT] = { -1, -1, -1 };
194 static bool pidfd_supported;
195 static int last_kill_pid_or_fd = -1;
196 static struct timespec last_kill_tm;
197 
198 /* lmkd configurable parameters */
199 static bool debug_process_killing;
200 static bool enable_pressure_upgrade;
201 static int64_t upgrade_pressure;
202 static int64_t downgrade_pressure;
203 static bool low_ram_device;
204 static bool kill_heaviest_task;
205 static unsigned long kill_timeout_ms;
206 static bool use_minfree_levels;
207 static bool per_app_memcg;
208 static int swap_free_low_percentage;
209 static int psi_partial_stall_ms;
210 static int psi_complete_stall_ms;
211 static int thrashing_limit_pct;
212 static int thrashing_limit_decay_pct;
213 static int thrashing_critical_pct;
214 static int swap_util_max;
215 static int64_t filecache_min_kb;
216 static int64_t stall_limit_critical;
217 static bool use_psi_monitors = false;
218 static int kpoll_fd;
219 static struct psi_threshold psi_thresholds[VMPRESS_LEVEL_COUNT] = {
220     { PSI_SOME, 70 },    /* 70ms out of 1sec for partial stall */
221     { PSI_SOME, 100 },   /* 100ms out of 1sec for partial stall */
222     { PSI_FULL, 70 },    /* 70ms out of 1sec for complete stall */
223 };
224 
225 static android_log_context ctx;
226 static Reaper reaper;
227 static int reaper_comm_fd[2];
228 
229 enum polling_update {
230     POLLING_DO_NOT_CHANGE,
231     POLLING_START,
232     POLLING_PAUSE,
233     POLLING_RESUME,
234 };
235 
236 /*
237  * Data used for periodic polling for the memory state of the device.
238  * Note that when system is not polling poll_handler is set to NULL,
239  * when polling starts poll_handler gets set and is reset back to
240  * NULL when polling stops.
241  */
242 struct polling_params {
243     struct event_handler_info* poll_handler;
244     struct event_handler_info* paused_handler;
245     struct timespec poll_start_tm;
246     struct timespec last_poll_tm;
247     int polling_interval_ms;
248     enum polling_update update;
249 };
250 
251 /* data required to handle events */
252 struct event_handler_info {
253     int data;
254     void (*handler)(int data, uint32_t events, struct polling_params *poll_params);
255 };
256 
257 /* data required to handle socket events */
258 struct sock_event_handler_info {
259     int sock;
260     pid_t pid;
261     uint32_t async_event_mask;
262     struct event_handler_info handler_info;
263 };
264 
265 /* max supported number of data connections (AMS, init, tests) */
266 #define MAX_DATA_CONN 3
267 
268 /* socket event handler data */
269 static struct sock_event_handler_info ctrl_sock;
270 static struct sock_event_handler_info data_sock[MAX_DATA_CONN];
271 
272 /* vmpressure event handler data */
273 static struct event_handler_info vmpressure_hinfo[VMPRESS_LEVEL_COUNT];
274 
275 /*
276  * 1 ctrl listen socket, 3 ctrl data socket, 3 memory pressure levels,
277  * 1 lmk events + 1 fd to wait for process death + 1 fd to receive kill failure notifications
278  */
279 #define MAX_EPOLL_EVENTS (1 + MAX_DATA_CONN + VMPRESS_LEVEL_COUNT + 1 + 1 + 1)
280 static int epollfd;
281 static int maxevents;
282 
283 /* OOM score values used by both kernel and framework */
284 #define OOM_SCORE_ADJ_MIN       (-1000)
285 #define OOM_SCORE_ADJ_MAX       1000
286 
287 static std::array<int, MAX_TARGETS> lowmem_adj;
288 static std::array<int, MAX_TARGETS> lowmem_minfree;
289 static int lowmem_targets_size;
290 
291 /* Fields to parse in /proc/zoneinfo */
292 /* zoneinfo per-zone fields */
293 enum zoneinfo_zone_field {
294     ZI_ZONE_NR_FREE_PAGES = 0,
295     ZI_ZONE_MIN,
296     ZI_ZONE_LOW,
297     ZI_ZONE_HIGH,
298     ZI_ZONE_PRESENT,
299     ZI_ZONE_NR_FREE_CMA,
300     ZI_ZONE_FIELD_COUNT
301 };
302 
303 static const char* const zoneinfo_zone_field_names[ZI_ZONE_FIELD_COUNT] = {
304     "nr_free_pages",
305     "min",
306     "low",
307     "high",
308     "present",
309     "nr_free_cma",
310 };
311 
312 /* zoneinfo per-zone special fields */
313 enum zoneinfo_zone_spec_field {
314     ZI_ZONE_SPEC_PROTECTION = 0,
315     ZI_ZONE_SPEC_PAGESETS,
316     ZI_ZONE_SPEC_FIELD_COUNT,
317 };
318 
319 static const char* const zoneinfo_zone_spec_field_names[ZI_ZONE_SPEC_FIELD_COUNT] = {
320     "protection:",
321     "pagesets",
322 };
323 
324 /* see __MAX_NR_ZONES definition in kernel mmzone.h */
325 #define MAX_NR_ZONES 6
326 
327 union zoneinfo_zone_fields {
328     struct {
329         int64_t nr_free_pages;
330         int64_t min;
331         int64_t low;
332         int64_t high;
333         int64_t present;
334         int64_t nr_free_cma;
335     } field;
336     int64_t arr[ZI_ZONE_FIELD_COUNT];
337 };
338 
339 struct zoneinfo_zone {
340     union zoneinfo_zone_fields fields;
341     int64_t protection[MAX_NR_ZONES];
342     int64_t max_protection;
343 };
344 
345 /* zoneinfo per-node fields */
346 enum zoneinfo_node_field {
347     ZI_NODE_NR_INACTIVE_FILE = 0,
348     ZI_NODE_NR_ACTIVE_FILE,
349     ZI_NODE_FIELD_COUNT
350 };
351 
352 static const char* const zoneinfo_node_field_names[ZI_NODE_FIELD_COUNT] = {
353     "nr_inactive_file",
354     "nr_active_file",
355 };
356 
357 union zoneinfo_node_fields {
358     struct {
359         int64_t nr_inactive_file;
360         int64_t nr_active_file;
361     } field;
362     int64_t arr[ZI_NODE_FIELD_COUNT];
363 };
364 
365 struct zoneinfo_node {
366     int id;
367     int zone_count;
368     struct zoneinfo_zone zones[MAX_NR_ZONES];
369     union zoneinfo_node_fields fields;
370 };
371 
372 /* for now two memory nodes is more than enough */
373 #define MAX_NR_NODES 2
374 
375 struct zoneinfo {
376     int node_count;
377     struct zoneinfo_node nodes[MAX_NR_NODES];
378     int64_t totalreserve_pages;
379     int64_t total_inactive_file;
380     int64_t total_active_file;
381 };
382 
383 /* Fields to parse in /proc/meminfo */
384 enum meminfo_field {
385     MI_NR_FREE_PAGES = 0,
386     MI_CACHED,
387     MI_SWAP_CACHED,
388     MI_BUFFERS,
389     MI_SHMEM,
390     MI_UNEVICTABLE,
391     MI_TOTAL_SWAP,
392     MI_FREE_SWAP,
393     MI_ACTIVE_ANON,
394     MI_INACTIVE_ANON,
395     MI_ACTIVE_FILE,
396     MI_INACTIVE_FILE,
397     MI_SRECLAIMABLE,
398     MI_SUNRECLAIM,
399     MI_KERNEL_STACK,
400     MI_PAGE_TABLES,
401     MI_ION_HELP,
402     MI_ION_HELP_POOL,
403     MI_CMA_FREE,
404     MI_FIELD_COUNT
405 };
406 
407 static const char* const meminfo_field_names[MI_FIELD_COUNT] = {
408     "MemFree:",
409     "Cached:",
410     "SwapCached:",
411     "Buffers:",
412     "Shmem:",
413     "Unevictable:",
414     "SwapTotal:",
415     "SwapFree:",
416     "Active(anon):",
417     "Inactive(anon):",
418     "Active(file):",
419     "Inactive(file):",
420     "SReclaimable:",
421     "SUnreclaim:",
422     "KernelStack:",
423     "PageTables:",
424     "ION_heap:",
425     "ION_heap_pool:",
426     "CmaFree:",
427 };
428 
429 union meminfo {
430     struct {
431         int64_t nr_free_pages;
432         int64_t cached;
433         int64_t swap_cached;
434         int64_t buffers;
435         int64_t shmem;
436         int64_t unevictable;
437         int64_t total_swap;
438         int64_t free_swap;
439         int64_t active_anon;
440         int64_t inactive_anon;
441         int64_t active_file;
442         int64_t inactive_file;
443         int64_t sreclaimable;
444         int64_t sunreclaimable;
445         int64_t kernel_stack;
446         int64_t page_tables;
447         int64_t ion_heap;
448         int64_t ion_heap_pool;
449         int64_t cma_free;
450         /* fields below are calculated rather than read from the file */
451         int64_t nr_file_pages;
452         int64_t total_gpu_kb;
453         int64_t easy_available;
454     } field;
455     int64_t arr[MI_FIELD_COUNT];
456 };
457 
458 /* Fields to parse in /proc/vmstat */
459 enum vmstat_field {
460     VS_FREE_PAGES,
461     VS_INACTIVE_FILE,
462     VS_ACTIVE_FILE,
463     VS_WORKINGSET_REFAULT,
464     VS_WORKINGSET_REFAULT_FILE,
465     VS_PGSCAN_KSWAPD,
466     VS_PGSCAN_DIRECT,
467     VS_PGSCAN_DIRECT_THROTTLE,
468     VS_FIELD_COUNT
469 };
470 
471 static const char* const vmstat_field_names[VS_FIELD_COUNT] = {
472     "nr_free_pages",
473     "nr_inactive_file",
474     "nr_active_file",
475     "workingset_refault",
476     "workingset_refault_file",
477     "pgscan_kswapd",
478     "pgscan_direct",
479     "pgscan_direct_throttle",
480 };
481 
482 union vmstat {
483     struct {
484         int64_t nr_free_pages;
485         int64_t nr_inactive_file;
486         int64_t nr_active_file;
487         int64_t workingset_refault;
488         int64_t workingset_refault_file;
489         int64_t pgscan_kswapd;
490         int64_t pgscan_direct;
491         int64_t pgscan_direct_throttle;
492     } field;
493     int64_t arr[VS_FIELD_COUNT];
494 };
495 
496 enum field_match_result {
497     NO_MATCH,
498     PARSE_FAIL,
499     PARSE_SUCCESS
500 };
501 
502 struct adjslot_list {
503     struct adjslot_list *next;
504     struct adjslot_list *prev;
505 };
506 
507 struct proc {
508     struct adjslot_list asl;
509     int pid;
510     int pidfd;
511     uid_t uid;
512     int oomadj;
513     pid_t reg_pid; /* PID of the process that registered this record */
514     bool valid;
515     struct proc *pidhash_next;
516 };
517 
518 struct reread_data {
519     const char* const filename;
520     int fd;
521 };
522 
523 #define PIDHASH_SZ 1024
524 static struct proc *pidhash[PIDHASH_SZ];
525 #define pid_hashfn(x) ((((x) >> 8) ^ (x)) & (PIDHASH_SZ - 1))
526 
527 #define ADJTOSLOT(adj) ((adj) + -OOM_SCORE_ADJ_MIN)
528 #define ADJTOSLOT_COUNT (ADJTOSLOT(OOM_SCORE_ADJ_MAX) + 1)
529 
530 // protects procadjslot_list from concurrent access
531 static std::shared_mutex adjslot_list_lock;
532 // procadjslot_list should be modified only from the main thread while exclusively holding
533 // adjslot_list_lock. Readers from non-main threads should hold adjslot_list_lock shared lock.
534 static struct adjslot_list procadjslot_list[ADJTOSLOT_COUNT];
535 
536 #define MAX_DISTINCT_OOM_ADJ 32
537 #define KILLCNT_INVALID_IDX 0xFF
538 /*
539  * Because killcnt array is sparse a two-level indirection is used
540  * to keep the size small. killcnt_idx stores index of the element in
541  * killcnt array. Index KILLCNT_INVALID_IDX indicates an unused slot.
542  */
543 static uint8_t killcnt_idx[ADJTOSLOT_COUNT];
544 static uint16_t killcnt[MAX_DISTINCT_OOM_ADJ];
545 static int killcnt_free_idx = 0;
546 static uint32_t killcnt_total = 0;
547 
548 /* PAGE_SIZE / 1024 */
549 static long page_k;
550 
551 static bool update_props();
552 static bool init_monitors();
553 static void destroy_monitors();
554 
clamp(int low,int high,int value)555 static int clamp(int low, int high, int value) {
556     return std::max(std::min(value, high), low);
557 }
558 
parse_int64(const char * str,int64_t * ret)559 static bool parse_int64(const char* str, int64_t* ret) {
560     char* endptr;
561     long long val = strtoll(str, &endptr, 10);
562     if (str == endptr || val > INT64_MAX) {
563         return false;
564     }
565     *ret = (int64_t)val;
566     return true;
567 }
568 
find_field(const char * name,const char * const field_names[],int field_count)569 static int find_field(const char* name, const char* const field_names[], int field_count) {
570     for (int i = 0; i < field_count; i++) {
571         if (!strcmp(name, field_names[i])) {
572             return i;
573         }
574     }
575     return -1;
576 }
577 
match_field(const char * cp,const char * ap,const char * const field_names[],int field_count,int64_t * field,int * field_idx)578 static enum field_match_result match_field(const char* cp, const char* ap,
579                                    const char* const field_names[],
580                                    int field_count, int64_t* field,
581                                    int *field_idx) {
582     int i = find_field(cp, field_names, field_count);
583     if (i < 0) {
584         return NO_MATCH;
585     }
586     *field_idx = i;
587     return parse_int64(ap, field) ? PARSE_SUCCESS : PARSE_FAIL;
588 }
589 
590 /*
591  * Read file content from the beginning up to max_len bytes or EOF
592  * whichever happens first.
593  */
read_all(int fd,char * buf,size_t max_len)594 static ssize_t read_all(int fd, char *buf, size_t max_len)
595 {
596     ssize_t ret = 0;
597     off_t offset = 0;
598 
599     while (max_len > 0) {
600         ssize_t r = TEMP_FAILURE_RETRY(pread(fd, buf, max_len, offset));
601         if (r == 0) {
602             break;
603         }
604         if (r == -1) {
605             return -1;
606         }
607         ret += r;
608         buf += r;
609         offset += r;
610         max_len -= r;
611     }
612 
613     return ret;
614 }
615 
616 /*
617  * Read a new or already opened file from the beginning.
618  * If the file has not been opened yet data->fd should be set to -1.
619  * To be used with files which are read often and possibly during high
620  * memory pressure to minimize file opening which by itself requires kernel
621  * memory allocation and might result in a stall on memory stressed system.
622  */
reread_file(struct reread_data * data)623 static char *reread_file(struct reread_data *data) {
624     /* start with page-size buffer and increase if needed */
625     static ssize_t buf_size = PAGE_SIZE;
626     static char *new_buf, *buf = NULL;
627     ssize_t size;
628 
629     if (data->fd == -1) {
630         /* First-time buffer initialization */
631         if (!buf && (buf = static_cast<char*>(malloc(buf_size))) == nullptr) {
632             return NULL;
633         }
634 
635         data->fd = TEMP_FAILURE_RETRY(open(data->filename, O_RDONLY | O_CLOEXEC));
636         if (data->fd < 0) {
637             ALOGE("%s open: %s", data->filename, strerror(errno));
638             return NULL;
639         }
640     }
641 
642     while (true) {
643         size = read_all(data->fd, buf, buf_size - 1);
644         if (size < 0) {
645             ALOGE("%s read: %s", data->filename, strerror(errno));
646             close(data->fd);
647             data->fd = -1;
648             return NULL;
649         }
650         if (size < buf_size - 1) {
651             break;
652         }
653         /*
654          * Since we are reading /proc files we can't use fstat to find out
655          * the real size of the file. Double the buffer size and keep retrying.
656          */
657         if ((new_buf = static_cast<char*>(realloc(buf, buf_size * 2))) == nullptr) {
658             errno = ENOMEM;
659             return NULL;
660         }
661         buf = new_buf;
662         buf_size *= 2;
663     }
664     buf[size] = 0;
665 
666     return buf;
667 }
668 
claim_record(struct proc * procp,pid_t pid)669 static bool claim_record(struct proc* procp, pid_t pid) {
670     if (procp->reg_pid == pid) {
671         /* Record already belongs to the registrant */
672         return true;
673     }
674     if (procp->reg_pid == 0) {
675         /* Old registrant is gone, claim the record */
676         procp->reg_pid = pid;
677         return true;
678     }
679     /* The record is owned by another registrant */
680     return false;
681 }
682 
remove_claims(pid_t pid)683 static void remove_claims(pid_t pid) {
684     int i;
685 
686     for (i = 0; i < PIDHASH_SZ; i++) {
687         struct proc* procp = pidhash[i];
688         while (procp) {
689             if (procp->reg_pid == pid) {
690                 procp->reg_pid = 0;
691             }
692             procp = procp->pidhash_next;
693         }
694     }
695 }
696 
ctrl_data_close(int dsock_idx)697 static void ctrl_data_close(int dsock_idx) {
698     struct epoll_event epev;
699 
700     ALOGI("closing lmkd data connection");
701     if (epoll_ctl(epollfd, EPOLL_CTL_DEL, data_sock[dsock_idx].sock, &epev) == -1) {
702         // Log a warning and keep going
703         ALOGW("epoll_ctl for data connection socket failed; errno=%d", errno);
704     }
705     maxevents--;
706 
707     close(data_sock[dsock_idx].sock);
708     data_sock[dsock_idx].sock = -1;
709 
710     /* Mark all records of the old registrant as unclaimed */
711     remove_claims(data_sock[dsock_idx].pid);
712 }
713 
ctrl_data_read(int dsock_idx,char * buf,size_t bufsz,struct ucred * sender_cred)714 static ssize_t ctrl_data_read(int dsock_idx, char* buf, size_t bufsz, struct ucred* sender_cred) {
715     struct iovec iov = {buf, bufsz};
716     char control[CMSG_SPACE(sizeof(struct ucred))];
717     struct msghdr hdr = {
718             NULL, 0, &iov, 1, control, sizeof(control), 0,
719     };
720     ssize_t ret;
721     ret = TEMP_FAILURE_RETRY(recvmsg(data_sock[dsock_idx].sock, &hdr, 0));
722     if (ret == -1) {
723         ALOGE("control data socket read failed; %s", strerror(errno));
724         return -1;
725     }
726     if (ret == 0) {
727         ALOGE("Got EOF on control data socket");
728         return -1;
729     }
730 
731     struct ucred* cred = NULL;
732     struct cmsghdr* cmsg = CMSG_FIRSTHDR(&hdr);
733     while (cmsg != NULL) {
734         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_CREDENTIALS) {
735             cred = (struct ucred*)CMSG_DATA(cmsg);
736             break;
737         }
738         cmsg = CMSG_NXTHDR(&hdr, cmsg);
739     }
740 
741     if (cred == NULL) {
742         ALOGE("Failed to retrieve sender credentials");
743         /* Close the connection */
744         ctrl_data_close(dsock_idx);
745         return -1;
746     }
747 
748     memcpy(sender_cred, cred, sizeof(struct ucred));
749 
750     /* Store PID of the peer */
751     data_sock[dsock_idx].pid = cred->pid;
752 
753     return ret;
754 }
755 
ctrl_data_write(int dsock_idx,char * buf,size_t bufsz)756 static int ctrl_data_write(int dsock_idx, char* buf, size_t bufsz) {
757     int ret = 0;
758 
759     ret = TEMP_FAILURE_RETRY(write(data_sock[dsock_idx].sock, buf, bufsz));
760 
761     if (ret == -1) {
762         ALOGE("control data socket write failed; errno=%d", errno);
763     } else if (ret == 0) {
764         ALOGE("Got EOF on control data socket");
765         ret = -1;
766     }
767 
768     return ret;
769 }
770 
771 /*
772  * Write the pid/uid pair over the data socket, note: all active clients
773  * will receive this unsolicited notification.
774  */
ctrl_data_write_lmk_kill_occurred(pid_t pid,uid_t uid)775 static void ctrl_data_write_lmk_kill_occurred(pid_t pid, uid_t uid) {
776     LMKD_CTRL_PACKET packet;
777     size_t len = lmkd_pack_set_prockills(packet, pid, uid);
778 
779     for (int i = 0; i < MAX_DATA_CONN; i++) {
780         if (data_sock[i].sock >= 0 && data_sock[i].async_event_mask & 1 << LMK_ASYNC_EVENT_KILL) {
781             ctrl_data_write(i, (char*)packet, len);
782         }
783     }
784 }
785 
786 /*
787  * Write the kill_stat/memory_stat over the data socket to be propagated via AMS to statsd
788  */
stats_write_lmk_kill_occurred(struct kill_stat * kill_st,struct memory_stat * mem_st)789 static void stats_write_lmk_kill_occurred(struct kill_stat *kill_st,
790                                           struct memory_stat *mem_st) {
791     LMK_KILL_OCCURRED_PACKET packet;
792     const size_t len = lmkd_pack_set_kill_occurred(packet, kill_st, mem_st);
793     if (len == 0) {
794         return;
795     }
796 
797     for (int i = 0; i < MAX_DATA_CONN; i++) {
798         if (data_sock[i].sock >= 0 && data_sock[i].async_event_mask & 1 << LMK_ASYNC_EVENT_STAT) {
799             ctrl_data_write(i, packet, len);
800         }
801     }
802 
803 }
804 
stats_write_lmk_kill_occurred_pid(int pid,struct kill_stat * kill_st,struct memory_stat * mem_st)805 static void stats_write_lmk_kill_occurred_pid(int pid, struct kill_stat *kill_st,
806                                               struct memory_stat *mem_st) {
807     kill_st->taskname = stats_get_task_name(pid);
808     if (kill_st->taskname != NULL) {
809         stats_write_lmk_kill_occurred(kill_st, mem_st);
810     }
811 }
812 
813 /*
814  * Write the state_changed over the data socket to be propagated via AMS to statsd
815  */
stats_write_lmk_state_changed(enum lmk_state state)816 static void stats_write_lmk_state_changed(enum lmk_state state) {
817     LMKD_CTRL_PACKET packet_state_changed;
818     const size_t len = lmkd_pack_set_state_changed(packet_state_changed, state);
819     if (len == 0) {
820         return;
821     }
822     for (int i = 0; i < MAX_DATA_CONN; i++) {
823         if (data_sock[i].sock >= 0 && data_sock[i].async_event_mask & 1 << LMK_ASYNC_EVENT_STAT) {
824             ctrl_data_write(i, (char*)packet_state_changed, len);
825         }
826     }
827 }
828 
poll_kernel(int poll_fd)829 static void poll_kernel(int poll_fd) {
830     if (poll_fd == -1) {
831         // not waiting
832         return;
833     }
834 
835     while (1) {
836         char rd_buf[256];
837         int bytes_read = TEMP_FAILURE_RETRY(pread(poll_fd, (void*)rd_buf, sizeof(rd_buf) - 1, 0));
838         if (bytes_read <= 0) break;
839         rd_buf[bytes_read] = '\0';
840 
841         int64_t pid;
842         int64_t uid;
843         int64_t group_leader_pid;
844         int64_t rss_in_pages;
845         struct memory_stat mem_st = {};
846         int16_t oom_score_adj;
847         int16_t min_score_adj;
848         int64_t starttime;
849         char* taskname = 0;
850 
851         int fields_read =
852                 sscanf(rd_buf,
853                        "%" SCNd64 " %" SCNd64 " %" SCNd64 " %" SCNd64 " %" SCNd64 " %" SCNd64
854                        " %" SCNd16 " %" SCNd16 " %" SCNd64 "\n%m[^\n]",
855                        &pid, &uid, &group_leader_pid, &mem_st.pgfault, &mem_st.pgmajfault,
856                        &rss_in_pages, &oom_score_adj, &min_score_adj, &starttime, &taskname);
857 
858         /* only the death of the group leader process is logged */
859         if (fields_read == 10 && group_leader_pid == pid) {
860             ctrl_data_write_lmk_kill_occurred((pid_t)pid, (uid_t)uid);
861             mem_st.process_start_time_ns = starttime * (NS_PER_SEC / sysconf(_SC_CLK_TCK));
862             mem_st.rss_in_bytes = rss_in_pages * PAGE_SIZE;
863 
864             struct kill_stat kill_st = {
865                 .uid = static_cast<int32_t>(uid),
866                 .kill_reason = NONE,
867                 .oom_score = oom_score_adj,
868                 .min_oom_score = min_score_adj,
869                 .free_mem_kb = 0,
870                 .free_swap_kb = 0,
871             };
872             stats_write_lmk_kill_occurred_pid(pid, &kill_st, &mem_st);
873         }
874 
875         free(taskname);
876     }
877 }
878 
init_poll_kernel()879 static bool init_poll_kernel() {
880     kpoll_fd = TEMP_FAILURE_RETRY(open("/proc/lowmemorykiller", O_RDONLY | O_NONBLOCK | O_CLOEXEC));
881 
882     if (kpoll_fd < 0) {
883         ALOGE("kernel lmk event file could not be opened; errno=%d", errno);
884         return false;
885     }
886 
887     return true;
888 }
889 
pid_lookup(int pid)890 static struct proc *pid_lookup(int pid) {
891     struct proc *procp;
892 
893     for (procp = pidhash[pid_hashfn(pid)]; procp && procp->pid != pid;
894          procp = procp->pidhash_next)
895             ;
896 
897     return procp;
898 }
899 
adjslot_insert(struct adjslot_list * head,struct adjslot_list * new_element)900 static void adjslot_insert(struct adjslot_list *head, struct adjslot_list *new_element)
901 {
902     struct adjslot_list *next = head->next;
903     new_element->prev = head;
904     new_element->next = next;
905     next->prev = new_element;
906     head->next = new_element;
907 }
908 
adjslot_remove(struct adjslot_list * old)909 static void adjslot_remove(struct adjslot_list *old)
910 {
911     struct adjslot_list *prev = old->prev;
912     struct adjslot_list *next = old->next;
913     next->prev = prev;
914     prev->next = next;
915 }
916 
adjslot_tail(struct adjslot_list * head)917 static struct adjslot_list *adjslot_tail(struct adjslot_list *head) {
918     struct adjslot_list *asl = head->prev;
919 
920     return asl == head ? NULL : asl;
921 }
922 
923 // Should be modified only from the main thread.
proc_slot(struct proc * procp)924 static void proc_slot(struct proc *procp) {
925     int adjslot = ADJTOSLOT(procp->oomadj);
926     std::scoped_lock lock(adjslot_list_lock);
927 
928     adjslot_insert(&procadjslot_list[adjslot], &procp->asl);
929 }
930 
931 // Should be modified only from the main thread.
proc_unslot(struct proc * procp)932 static void proc_unslot(struct proc *procp) {
933     std::scoped_lock lock(adjslot_list_lock);
934 
935     adjslot_remove(&procp->asl);
936 }
937 
proc_insert(struct proc * procp)938 static void proc_insert(struct proc *procp) {
939     int hval = pid_hashfn(procp->pid);
940 
941     procp->pidhash_next = pidhash[hval];
942     pidhash[hval] = procp;
943     proc_slot(procp);
944 }
945 
946 // Can be called only from the main thread.
pid_remove(int pid)947 static int pid_remove(int pid) {
948     int hval = pid_hashfn(pid);
949     struct proc *procp;
950     struct proc *prevp;
951 
952     for (procp = pidhash[hval], prevp = NULL; procp && procp->pid != pid;
953          procp = procp->pidhash_next)
954             prevp = procp;
955 
956     if (!procp)
957         return -1;
958 
959     if (!prevp)
960         pidhash[hval] = procp->pidhash_next;
961     else
962         prevp->pidhash_next = procp->pidhash_next;
963 
964     proc_unslot(procp);
965     /*
966      * Close pidfd here if we are not waiting for corresponding process to die,
967      * in which case stop_wait_for_proc_kill() will close the pidfd later
968      */
969     if (procp->pidfd >= 0 && procp->pidfd != last_kill_pid_or_fd) {
970         close(procp->pidfd);
971     }
972     free(procp);
973     return 0;
974 }
975 
pid_invalidate(int pid)976 static void pid_invalidate(int pid) {
977     std::shared_lock lock(adjslot_list_lock);
978     struct proc *procp = pid_lookup(pid);
979 
980     if (procp) {
981         procp->valid = false;
982     }
983 }
984 
985 /*
986  * Write a string to a file.
987  * Returns false if the file does not exist.
988  */
writefilestring(const char * path,const char * s,bool err_if_missing)989 static bool writefilestring(const char *path, const char *s,
990                             bool err_if_missing) {
991     int fd = open(path, O_WRONLY | O_CLOEXEC);
992     ssize_t len = strlen(s);
993     ssize_t ret;
994 
995     if (fd < 0) {
996         if (err_if_missing) {
997             ALOGE("Error opening %s; errno=%d", path, errno);
998         }
999         return false;
1000     }
1001 
1002     ret = TEMP_FAILURE_RETRY(write(fd, s, len));
1003     if (ret < 0) {
1004         ALOGE("Error writing %s; errno=%d", path, errno);
1005     } else if (ret < len) {
1006         ALOGE("Short write on %s; length=%zd", path, ret);
1007     }
1008 
1009     close(fd);
1010     return true;
1011 }
1012 
get_time_diff_ms(struct timespec * from,struct timespec * to)1013 static inline long get_time_diff_ms(struct timespec *from,
1014                                     struct timespec *to) {
1015     return (to->tv_sec - from->tv_sec) * (long)MS_PER_SEC +
1016            (to->tv_nsec - from->tv_nsec) / (long)NS_PER_MS;
1017 }
1018 
1019 /* Reads /proc/pid/status into buf. */
read_proc_status(int pid,char * buf,size_t buf_sz)1020 static bool read_proc_status(int pid, char *buf, size_t buf_sz) {
1021     char path[PATH_MAX];
1022     int fd;
1023     ssize_t size;
1024 
1025     snprintf(path, PATH_MAX, "/proc/%d/status", pid);
1026     fd = open(path, O_RDONLY | O_CLOEXEC);
1027     if (fd < 0) {
1028         return false;
1029     }
1030 
1031     size = read_all(fd, buf, buf_sz - 1);
1032     close(fd);
1033     if (size < 0) {
1034         return false;
1035     }
1036     buf[size] = 0;
1037     return true;
1038 }
1039 
1040 /* Looks for tag in buf and parses the first integer */
parse_status_tag(char * buf,const char * tag,int64_t * out)1041 static bool parse_status_tag(char *buf, const char *tag, int64_t *out) {
1042     char *pos = buf;
1043     while (true) {
1044         pos = strstr(pos, tag);
1045         /* Stop if tag not found or found at the line beginning */
1046         if (pos == NULL || pos == buf || pos[-1] == '\n') {
1047             break;
1048         }
1049         pos++;
1050     }
1051 
1052     if (pos == NULL) {
1053         return false;
1054     }
1055 
1056     pos += strlen(tag);
1057     while (*pos == ' ') ++pos;
1058     return parse_int64(pos, out);
1059 }
1060 
proc_get_size(int pid)1061 static int proc_get_size(int pid) {
1062     char path[PATH_MAX];
1063     char line[LINE_MAX];
1064     int fd;
1065     int rss = 0;
1066     int total;
1067     ssize_t ret;
1068 
1069     /* gid containing AID_READPROC required */
1070     snprintf(path, PATH_MAX, "/proc/%d/statm", pid);
1071     fd = open(path, O_RDONLY | O_CLOEXEC);
1072     if (fd == -1)
1073         return -1;
1074 
1075     ret = read_all(fd, line, sizeof(line) - 1);
1076     if (ret < 0) {
1077         close(fd);
1078         return -1;
1079     }
1080     line[ret] = '\0';
1081 
1082     sscanf(line, "%d %d ", &total, &rss);
1083     close(fd);
1084     return rss;
1085 }
1086 
proc_get_name(int pid,char * buf,size_t buf_size)1087 static char *proc_get_name(int pid, char *buf, size_t buf_size) {
1088     char path[PATH_MAX];
1089     int fd;
1090     char *cp;
1091     ssize_t ret;
1092 
1093     /* gid containing AID_READPROC required */
1094     snprintf(path, PATH_MAX, "/proc/%d/cmdline", pid);
1095     fd = open(path, O_RDONLY | O_CLOEXEC);
1096     if (fd == -1) {
1097         return NULL;
1098     }
1099     ret = read_all(fd, buf, buf_size - 1);
1100     close(fd);
1101     if (ret < 0) {
1102         return NULL;
1103     }
1104     buf[ret] = '\0';
1105 
1106     cp = strchr(buf, ' ');
1107     if (cp) {
1108         *cp = '\0';
1109     }
1110 
1111     return buf;
1112 }
1113 
cmd_procprio(LMKD_CTRL_PACKET packet,int field_count,struct ucred * cred)1114 static void cmd_procprio(LMKD_CTRL_PACKET packet, int field_count, struct ucred *cred) {
1115     struct proc *procp;
1116     char path[LINE_MAX];
1117     char val[20];
1118     int soft_limit_mult;
1119     struct lmk_procprio params;
1120     bool is_system_server;
1121     struct passwd *pwdrec;
1122     int64_t tgid;
1123     char buf[PAGE_SIZE];
1124 
1125     lmkd_pack_get_procprio(packet, field_count, &params);
1126 
1127     if (params.oomadj < OOM_SCORE_ADJ_MIN ||
1128         params.oomadj > OOM_SCORE_ADJ_MAX) {
1129         ALOGE("Invalid PROCPRIO oomadj argument %d", params.oomadj);
1130         return;
1131     }
1132 
1133     if (params.ptype < PROC_TYPE_FIRST || params.ptype >= PROC_TYPE_COUNT) {
1134         ALOGE("Invalid PROCPRIO process type argument %d", params.ptype);
1135         return;
1136     }
1137 
1138     /* Check if registered process is a thread group leader */
1139     if (read_proc_status(params.pid, buf, sizeof(buf))) {
1140         if (parse_status_tag(buf, PROC_STATUS_TGID_FIELD, &tgid) && tgid != params.pid) {
1141             ALOGE("Attempt to register a task that is not a thread group leader "
1142                   "(tid %d, tgid %" PRId64 ")", params.pid, tgid);
1143             return;
1144         }
1145     }
1146 
1147     /* gid containing AID_READPROC required */
1148     /* CAP_SYS_RESOURCE required */
1149     /* CAP_DAC_OVERRIDE required */
1150     snprintf(path, sizeof(path), "/proc/%d/oom_score_adj", params.pid);
1151     snprintf(val, sizeof(val), "%d", params.oomadj);
1152     if (!writefilestring(path, val, false)) {
1153         ALOGW("Failed to open %s; errno=%d: process %d might have been killed",
1154               path, errno, params.pid);
1155         /* If this file does not exist the process is dead. */
1156         return;
1157     }
1158 
1159     if (use_inkernel_interface) {
1160         stats_store_taskname(params.pid, proc_get_name(params.pid, path, sizeof(path)));
1161         return;
1162     }
1163 
1164     /* lmkd should not change soft limits for services */
1165     if (params.ptype == PROC_TYPE_APP && per_app_memcg) {
1166         if (params.oomadj >= 900) {
1167             soft_limit_mult = 0;
1168         } else if (params.oomadj >= 800) {
1169             soft_limit_mult = 0;
1170         } else if (params.oomadj >= 700) {
1171             soft_limit_mult = 0;
1172         } else if (params.oomadj >= 600) {
1173             // Launcher should be perceptible, don't kill it.
1174             params.oomadj = 200;
1175             soft_limit_mult = 1;
1176         } else if (params.oomadj >= 500) {
1177             soft_limit_mult = 0;
1178         } else if (params.oomadj >= 400) {
1179             soft_limit_mult = 0;
1180         } else if (params.oomadj >= 300) {
1181             soft_limit_mult = 1;
1182         } else if (params.oomadj >= 200) {
1183             soft_limit_mult = 8;
1184         } else if (params.oomadj >= 100) {
1185             soft_limit_mult = 10;
1186         } else if (params.oomadj >=   0) {
1187             soft_limit_mult = 20;
1188         } else {
1189             // Persistent processes will have a large
1190             // soft limit 512MB.
1191             soft_limit_mult = 64;
1192         }
1193 
1194         std::string path;
1195         if (!CgroupGetAttributePathForTask("MemSoftLimit", params.pid, &path)) {
1196             ALOGE("Querying MemSoftLimit path failed");
1197             return;
1198         }
1199 
1200         snprintf(val, sizeof(val), "%d", soft_limit_mult * EIGHT_MEGA);
1201 
1202         /*
1203          * system_server process has no memcg under /dev/memcg/apps but should be
1204          * registered with lmkd. This is the best way so far to identify it.
1205          */
1206         is_system_server = (params.oomadj == SYSTEM_ADJ &&
1207                             (pwdrec = getpwnam("system")) != NULL &&
1208                             params.uid == pwdrec->pw_uid);
1209         writefilestring(path.c_str(), val, !is_system_server);
1210     }
1211 
1212     procp = pid_lookup(params.pid);
1213     if (!procp) {
1214         int pidfd = -1;
1215 
1216         if (pidfd_supported) {
1217             pidfd = TEMP_FAILURE_RETRY(pidfd_open(params.pid, 0));
1218             if (pidfd < 0) {
1219                 ALOGE("pidfd_open for pid %d failed; errno=%d", params.pid, errno);
1220                 return;
1221             }
1222         }
1223 
1224         procp = static_cast<struct proc*>(calloc(1, sizeof(struct proc)));
1225         if (!procp) {
1226             // Oh, the irony.  May need to rebuild our state.
1227             return;
1228         }
1229 
1230         procp->pid = params.pid;
1231         procp->pidfd = pidfd;
1232         procp->uid = params.uid;
1233         procp->reg_pid = cred->pid;
1234         procp->oomadj = params.oomadj;
1235         procp->valid = true;
1236         proc_insert(procp);
1237     } else {
1238         if (!claim_record(procp, cred->pid)) {
1239             char buf[LINE_MAX];
1240             char *taskname = proc_get_name(cred->pid, buf, sizeof(buf));
1241             /* Only registrant of the record can remove it */
1242             ALOGE("%s (%d, %d) attempts to modify a process registered by another client",
1243                 taskname ? taskname : "A process ", cred->uid, cred->pid);
1244             return;
1245         }
1246         proc_unslot(procp);
1247         procp->oomadj = params.oomadj;
1248         proc_slot(procp);
1249     }
1250 }
1251 
cmd_procremove(LMKD_CTRL_PACKET packet,struct ucred * cred)1252 static void cmd_procremove(LMKD_CTRL_PACKET packet, struct ucred *cred) {
1253     struct lmk_procremove params;
1254     struct proc *procp;
1255 
1256     lmkd_pack_get_procremove(packet, &params);
1257 
1258     if (use_inkernel_interface) {
1259         /*
1260          * Perform an extra check before the pid is removed, after which it
1261          * will be impossible for poll_kernel to get the taskname. poll_kernel()
1262          * is potentially a long-running blocking function; however this method
1263          * handles AMS requests but does not block AMS.
1264          */
1265         poll_kernel(kpoll_fd);
1266 
1267         stats_remove_taskname(params.pid);
1268         return;
1269     }
1270 
1271     procp = pid_lookup(params.pid);
1272     if (!procp) {
1273         return;
1274     }
1275 
1276     if (!claim_record(procp, cred->pid)) {
1277         char buf[LINE_MAX];
1278         char *taskname = proc_get_name(cred->pid, buf, sizeof(buf));
1279         /* Only registrant of the record can remove it */
1280         ALOGE("%s (%d, %d) attempts to unregister a process registered by another client",
1281             taskname ? taskname : "A process ", cred->uid, cred->pid);
1282         return;
1283     }
1284 
1285     /*
1286      * WARNING: After pid_remove() procp is freed and can't be used!
1287      * Therefore placed at the end of the function.
1288      */
1289     pid_remove(params.pid);
1290 }
1291 
cmd_procpurge(struct ucred * cred)1292 static void cmd_procpurge(struct ucred *cred) {
1293     int i;
1294     struct proc *procp;
1295     struct proc *next;
1296 
1297     if (use_inkernel_interface) {
1298         stats_purge_tasknames();
1299         return;
1300     }
1301 
1302     for (i = 0; i < PIDHASH_SZ; i++) {
1303         procp = pidhash[i];
1304         while (procp) {
1305             next = procp->pidhash_next;
1306             /* Purge only records created by the requestor */
1307             if (claim_record(procp, cred->pid)) {
1308                 pid_remove(procp->pid);
1309             }
1310             procp = next;
1311         }
1312     }
1313 }
1314 
cmd_subscribe(int dsock_idx,LMKD_CTRL_PACKET packet)1315 static void cmd_subscribe(int dsock_idx, LMKD_CTRL_PACKET packet) {
1316     struct lmk_subscribe params;
1317 
1318     lmkd_pack_get_subscribe(packet, &params);
1319     data_sock[dsock_idx].async_event_mask |= 1 << params.evt_type;
1320 }
1321 
inc_killcnt(int oomadj)1322 static void inc_killcnt(int oomadj) {
1323     int slot = ADJTOSLOT(oomadj);
1324     uint8_t idx = killcnt_idx[slot];
1325 
1326     if (idx == KILLCNT_INVALID_IDX) {
1327         /* index is not assigned for this oomadj */
1328         if (killcnt_free_idx < MAX_DISTINCT_OOM_ADJ) {
1329             killcnt_idx[slot] = killcnt_free_idx;
1330             killcnt[killcnt_free_idx] = 1;
1331             killcnt_free_idx++;
1332         } else {
1333             ALOGW("Number of distinct oomadj levels exceeds %d",
1334                 MAX_DISTINCT_OOM_ADJ);
1335         }
1336     } else {
1337         /*
1338          * wraparound is highly unlikely and is detectable using total
1339          * counter because it has to be equal to the sum of all counters
1340          */
1341         killcnt[idx]++;
1342     }
1343     /* increment total kill counter */
1344     killcnt_total++;
1345 }
1346 
get_killcnt(int min_oomadj,int max_oomadj)1347 static int get_killcnt(int min_oomadj, int max_oomadj) {
1348     int slot;
1349     int count = 0;
1350 
1351     if (min_oomadj > max_oomadj)
1352         return 0;
1353 
1354     /* special case to get total kill count */
1355     if (min_oomadj > OOM_SCORE_ADJ_MAX)
1356         return killcnt_total;
1357 
1358     while (min_oomadj <= max_oomadj &&
1359            (slot = ADJTOSLOT(min_oomadj)) < ADJTOSLOT_COUNT) {
1360         uint8_t idx = killcnt_idx[slot];
1361         if (idx != KILLCNT_INVALID_IDX) {
1362             count += killcnt[idx];
1363         }
1364         min_oomadj++;
1365     }
1366 
1367     return count;
1368 }
1369 
cmd_getkillcnt(LMKD_CTRL_PACKET packet)1370 static int cmd_getkillcnt(LMKD_CTRL_PACKET packet) {
1371     struct lmk_getkillcnt params;
1372 
1373     if (use_inkernel_interface) {
1374         /* kernel driver does not expose this information */
1375         return 0;
1376     }
1377 
1378     lmkd_pack_get_getkillcnt(packet, &params);
1379 
1380     return get_killcnt(params.min_oomadj, params.max_oomadj);
1381 }
1382 
cmd_target(int ntargets,LMKD_CTRL_PACKET packet)1383 static void cmd_target(int ntargets, LMKD_CTRL_PACKET packet) {
1384     int i;
1385     struct lmk_target target;
1386     char minfree_str[PROPERTY_VALUE_MAX];
1387     char *pstr = minfree_str;
1388     char *pend = minfree_str + sizeof(minfree_str);
1389     static struct timespec last_req_tm;
1390     struct timespec curr_tm;
1391 
1392     if (ntargets < 1 || ntargets > (int)lowmem_adj.size()) {
1393         return;
1394     }
1395 
1396     /*
1397      * Ratelimit minfree updates to once per TARGET_UPDATE_MIN_INTERVAL_MS
1398      * to prevent DoS attacks
1399      */
1400     if (clock_gettime(CLOCK_MONOTONIC_COARSE, &curr_tm) != 0) {
1401         ALOGE("Failed to get current time");
1402         return;
1403     }
1404 
1405     if (get_time_diff_ms(&last_req_tm, &curr_tm) <
1406         TARGET_UPDATE_MIN_INTERVAL_MS) {
1407         ALOGE("Ignoring frequent updated to lmkd limits");
1408         return;
1409     }
1410 
1411     last_req_tm = curr_tm;
1412 
1413     for (i = 0; i < ntargets; i++) {
1414         lmkd_pack_get_target(packet, i, &target);
1415         lowmem_minfree[i] = target.minfree;
1416         lowmem_adj[i] = target.oom_adj_score;
1417 
1418         pstr += snprintf(pstr, pend - pstr, "%d:%d,", target.minfree,
1419             target.oom_adj_score);
1420         if (pstr >= pend) {
1421             /* if no more space in the buffer then terminate the loop */
1422             pstr = pend;
1423             break;
1424         }
1425     }
1426 
1427     lowmem_targets_size = ntargets;
1428 
1429     /* Override the last extra comma */
1430     pstr[-1] = '\0';
1431     property_set("sys.lmk.minfree_levels", minfree_str);
1432 
1433     if (has_inkernel_module) {
1434         char minfreestr[128];
1435         char killpriostr[128];
1436 
1437         minfreestr[0] = '\0';
1438         killpriostr[0] = '\0';
1439 
1440         for (i = 0; i < lowmem_targets_size; i++) {
1441             char val[40];
1442 
1443             if (i) {
1444                 strlcat(minfreestr, ",", sizeof(minfreestr));
1445                 strlcat(killpriostr, ",", sizeof(killpriostr));
1446             }
1447 
1448             snprintf(val, sizeof(val), "%d", use_inkernel_interface ? lowmem_minfree[i] : 0);
1449             strlcat(minfreestr, val, sizeof(minfreestr));
1450             snprintf(val, sizeof(val), "%d", use_inkernel_interface ? lowmem_adj[i] : 0);
1451             strlcat(killpriostr, val, sizeof(killpriostr));
1452         }
1453 
1454         writefilestring(INKERNEL_MINFREE_PATH, minfreestr, true);
1455         writefilestring(INKERNEL_ADJ_PATH, killpriostr, true);
1456     }
1457 }
1458 
ctrl_command_handler(int dsock_idx)1459 static void ctrl_command_handler(int dsock_idx) {
1460     LMKD_CTRL_PACKET packet;
1461     struct ucred cred;
1462     int len;
1463     enum lmk_cmd cmd;
1464     int nargs;
1465     int targets;
1466     int kill_cnt;
1467     int result;
1468 
1469     len = ctrl_data_read(dsock_idx, (char *)packet, CTRL_PACKET_MAX_SIZE, &cred);
1470     if (len <= 0)
1471         return;
1472 
1473     if (len < (int)sizeof(int)) {
1474         ALOGE("Wrong control socket read length len=%d", len);
1475         return;
1476     }
1477 
1478     cmd = lmkd_pack_get_cmd(packet);
1479     nargs = len / sizeof(int) - 1;
1480     if (nargs < 0)
1481         goto wronglen;
1482 
1483     switch(cmd) {
1484     case LMK_TARGET:
1485         targets = nargs / 2;
1486         if (nargs & 0x1 || targets > (int)lowmem_adj.size()) {
1487             goto wronglen;
1488         }
1489         cmd_target(targets, packet);
1490         break;
1491     case LMK_PROCPRIO:
1492         /* process type field is optional for backward compatibility */
1493         if (nargs < 3 || nargs > 4)
1494             goto wronglen;
1495         cmd_procprio(packet, nargs, &cred);
1496         break;
1497     case LMK_PROCREMOVE:
1498         if (nargs != 1)
1499             goto wronglen;
1500         cmd_procremove(packet, &cred);
1501         break;
1502     case LMK_PROCPURGE:
1503         if (nargs != 0)
1504             goto wronglen;
1505         cmd_procpurge(&cred);
1506         break;
1507     case LMK_GETKILLCNT:
1508         if (nargs != 2)
1509             goto wronglen;
1510         kill_cnt = cmd_getkillcnt(packet);
1511         len = lmkd_pack_set_getkillcnt_repl(packet, kill_cnt);
1512         if (ctrl_data_write(dsock_idx, (char *)packet, len) != len)
1513             return;
1514         break;
1515     case LMK_SUBSCRIBE:
1516         if (nargs != 1)
1517             goto wronglen;
1518         cmd_subscribe(dsock_idx, packet);
1519         break;
1520     case LMK_PROCKILL:
1521         /* This command code is NOT expected at all */
1522         ALOGE("Received unexpected command code %d", cmd);
1523         break;
1524     case LMK_UPDATE_PROPS:
1525         if (nargs != 0)
1526             goto wronglen;
1527         result = -1;
1528         if (update_props()) {
1529             if (!use_inkernel_interface) {
1530                 /* Reinitialize monitors to apply new settings */
1531                 destroy_monitors();
1532                 if (init_monitors()) {
1533                     result = 0;
1534                 }
1535             } else {
1536                 result = 0;
1537             }
1538         }
1539 
1540         len = lmkd_pack_set_update_props_repl(packet, result);
1541         if (ctrl_data_write(dsock_idx, (char *)packet, len) != len) {
1542             ALOGE("Failed to report operation results");
1543         }
1544         if (!result) {
1545             ALOGI("Properties reinitilized");
1546         } else {
1547             /* New settings can't be supported, crash to be restarted */
1548             ALOGE("New configuration is not supported. Exiting...");
1549             exit(1);
1550         }
1551         break;
1552     default:
1553         ALOGE("Received unknown command code %d", cmd);
1554         return;
1555     }
1556 
1557     return;
1558 
1559 wronglen:
1560     ALOGE("Wrong control socket read length cmd=%d len=%d", cmd, len);
1561 }
1562 
ctrl_data_handler(int data,uint32_t events,struct polling_params * poll_params __unused)1563 static void ctrl_data_handler(int data, uint32_t events,
1564                               struct polling_params *poll_params __unused) {
1565     if (events & EPOLLIN) {
1566         ctrl_command_handler(data);
1567     }
1568 }
1569 
get_free_dsock()1570 static int get_free_dsock() {
1571     for (int i = 0; i < MAX_DATA_CONN; i++) {
1572         if (data_sock[i].sock < 0) {
1573             return i;
1574         }
1575     }
1576     return -1;
1577 }
1578 
ctrl_connect_handler(int data __unused,uint32_t events __unused,struct polling_params * poll_params __unused)1579 static void ctrl_connect_handler(int data __unused, uint32_t events __unused,
1580                                  struct polling_params *poll_params __unused) {
1581     struct epoll_event epev;
1582     int free_dscock_idx = get_free_dsock();
1583 
1584     if (free_dscock_idx < 0) {
1585         /*
1586          * Number of data connections exceeded max supported. This should not
1587          * happen but if it does we drop all existing connections and accept
1588          * the new one. This prevents inactive connections from monopolizing
1589          * data socket and if we drop ActivityManager connection it will
1590          * immediately reconnect.
1591          */
1592         for (int i = 0; i < MAX_DATA_CONN; i++) {
1593             ctrl_data_close(i);
1594         }
1595         free_dscock_idx = 0;
1596     }
1597 
1598     data_sock[free_dscock_idx].sock = accept(ctrl_sock.sock, NULL, NULL);
1599     if (data_sock[free_dscock_idx].sock < 0) {
1600         ALOGE("lmkd control socket accept failed; errno=%d", errno);
1601         return;
1602     }
1603 
1604     ALOGI("lmkd data connection established");
1605     /* use data to store data connection idx */
1606     data_sock[free_dscock_idx].handler_info.data = free_dscock_idx;
1607     data_sock[free_dscock_idx].handler_info.handler = ctrl_data_handler;
1608     data_sock[free_dscock_idx].async_event_mask = 0;
1609     epev.events = EPOLLIN;
1610     epev.data.ptr = (void *)&(data_sock[free_dscock_idx].handler_info);
1611     if (epoll_ctl(epollfd, EPOLL_CTL_ADD, data_sock[free_dscock_idx].sock, &epev) == -1) {
1612         ALOGE("epoll_ctl for data connection socket failed; errno=%d", errno);
1613         ctrl_data_close(free_dscock_idx);
1614         return;
1615     }
1616     maxevents++;
1617 }
1618 
1619 /*
1620  * /proc/zoneinfo parsing routines
1621  * Expected file format is:
1622  *
1623  *   Node <node_id>, zone   <zone_name>
1624  *   (
1625  *    per-node stats
1626  *       (<per-node field name> <value>)+
1627  *   )?
1628  *   (pages free     <value>
1629  *       (<per-zone field name> <value>)+
1630  *    pagesets
1631  *       (<unused fields>)*
1632  *   )+
1633  *   ...
1634  */
zoneinfo_parse_protection(char * buf,struct zoneinfo_zone * zone)1635 static void zoneinfo_parse_protection(char *buf, struct zoneinfo_zone *zone) {
1636     int zone_idx;
1637     int64_t max = 0;
1638     char *save_ptr;
1639 
1640     for (buf = strtok_r(buf, "(), ", &save_ptr), zone_idx = 0;
1641          buf && zone_idx < MAX_NR_ZONES;
1642          buf = strtok_r(NULL, "), ", &save_ptr), zone_idx++) {
1643         long long zoneval = strtoll(buf, &buf, 0);
1644         if (zoneval > max) {
1645             max = (zoneval > INT64_MAX) ? INT64_MAX : zoneval;
1646         }
1647         zone->protection[zone_idx] = zoneval;
1648     }
1649     zone->max_protection = max;
1650 }
1651 
zoneinfo_parse_zone(char ** buf,struct zoneinfo_zone * zone)1652 static int zoneinfo_parse_zone(char **buf, struct zoneinfo_zone *zone) {
1653     for (char *line = strtok_r(NULL, "\n", buf); line;
1654          line = strtok_r(NULL, "\n", buf)) {
1655         char *cp;
1656         char *ap;
1657         char *save_ptr;
1658         int64_t val;
1659         int field_idx;
1660         enum field_match_result match_res;
1661 
1662         cp = strtok_r(line, " ", &save_ptr);
1663         if (!cp) {
1664             return false;
1665         }
1666 
1667         field_idx = find_field(cp, zoneinfo_zone_spec_field_names, ZI_ZONE_SPEC_FIELD_COUNT);
1668         if (field_idx >= 0) {
1669             /* special field */
1670             if (field_idx == ZI_ZONE_SPEC_PAGESETS) {
1671                 /* no mode fields we are interested in */
1672                 return true;
1673             }
1674 
1675             /* protection field */
1676             ap = strtok_r(NULL, ")", &save_ptr);
1677             if (ap) {
1678                 zoneinfo_parse_protection(ap, zone);
1679             }
1680             continue;
1681         }
1682 
1683         ap = strtok_r(NULL, " ", &save_ptr);
1684         if (!ap) {
1685             continue;
1686         }
1687 
1688         match_res = match_field(cp, ap, zoneinfo_zone_field_names, ZI_ZONE_FIELD_COUNT,
1689             &val, &field_idx);
1690         if (match_res == PARSE_FAIL) {
1691             return false;
1692         }
1693         if (match_res == PARSE_SUCCESS) {
1694             zone->fields.arr[field_idx] = val;
1695         }
1696         if (field_idx == ZI_ZONE_PRESENT && val == 0) {
1697             /* zone is not populated, stop parsing it */
1698             return true;
1699         }
1700     }
1701     return false;
1702 }
1703 
zoneinfo_parse_node(char ** buf,struct zoneinfo_node * node)1704 static int zoneinfo_parse_node(char **buf, struct zoneinfo_node *node) {
1705     int fields_to_match = ZI_NODE_FIELD_COUNT;
1706 
1707     for (char *line = strtok_r(NULL, "\n", buf); line;
1708          line = strtok_r(NULL, "\n", buf)) {
1709         char *cp;
1710         char *ap;
1711         char *save_ptr;
1712         int64_t val;
1713         int field_idx;
1714         enum field_match_result match_res;
1715 
1716         cp = strtok_r(line, " ", &save_ptr);
1717         if (!cp) {
1718             return false;
1719         }
1720 
1721         ap = strtok_r(NULL, " ", &save_ptr);
1722         if (!ap) {
1723             return false;
1724         }
1725 
1726         match_res = match_field(cp, ap, zoneinfo_node_field_names, ZI_NODE_FIELD_COUNT,
1727             &val, &field_idx);
1728         if (match_res == PARSE_FAIL) {
1729             return false;
1730         }
1731         if (match_res == PARSE_SUCCESS) {
1732             node->fields.arr[field_idx] = val;
1733             fields_to_match--;
1734             if (!fields_to_match) {
1735                 return true;
1736             }
1737         }
1738     }
1739     return false;
1740 }
1741 
zoneinfo_parse(struct zoneinfo * zi)1742 static int zoneinfo_parse(struct zoneinfo *zi) {
1743     static struct reread_data file_data = {
1744         .filename = ZONEINFO_PATH,
1745         .fd = -1,
1746     };
1747     char *buf;
1748     char *save_ptr;
1749     char *line;
1750     char zone_name[LINE_MAX + 1];
1751     struct zoneinfo_node *node = NULL;
1752     int node_idx = 0;
1753     int zone_idx = 0;
1754 
1755     memset(zi, 0, sizeof(struct zoneinfo));
1756 
1757     if ((buf = reread_file(&file_data)) == NULL) {
1758         return -1;
1759     }
1760 
1761     for (line = strtok_r(buf, "\n", &save_ptr); line;
1762          line = strtok_r(NULL, "\n", &save_ptr)) {
1763         int node_id;
1764         if (sscanf(line, "Node %d, zone %" STRINGIFY(LINE_MAX) "s", &node_id, zone_name) == 2) {
1765             if (!node || node->id != node_id) {
1766                 /* new node is found */
1767                 if (node) {
1768                     node->zone_count = zone_idx + 1;
1769                     node_idx++;
1770                     if (node_idx == MAX_NR_NODES) {
1771                         /* max node count exceeded */
1772                         ALOGE("%s parse error", file_data.filename);
1773                         return -1;
1774                     }
1775                 }
1776                 node = &zi->nodes[node_idx];
1777                 node->id = node_id;
1778                 zone_idx = 0;
1779                 if (!zoneinfo_parse_node(&save_ptr, node)) {
1780                     ALOGE("%s parse error", file_data.filename);
1781                     return -1;
1782                 }
1783             } else {
1784                 /* new zone is found */
1785                 zone_idx++;
1786             }
1787             if (!zoneinfo_parse_zone(&save_ptr, &node->zones[zone_idx])) {
1788                 ALOGE("%s parse error", file_data.filename);
1789                 return -1;
1790             }
1791         }
1792     }
1793     if (!node) {
1794         ALOGE("%s parse error", file_data.filename);
1795         return -1;
1796     }
1797     node->zone_count = zone_idx + 1;
1798     zi->node_count = node_idx + 1;
1799 
1800     /* calculate totals fields */
1801     for (node_idx = 0; node_idx < zi->node_count; node_idx++) {
1802         node = &zi->nodes[node_idx];
1803         for (zone_idx = 0; zone_idx < node->zone_count; zone_idx++) {
1804             struct zoneinfo_zone *zone = &zi->nodes[node_idx].zones[zone_idx];
1805             zi->totalreserve_pages += zone->max_protection + zone->fields.field.high;
1806         }
1807         zi->total_inactive_file += node->fields.field.nr_inactive_file;
1808         zi->total_active_file += node->fields.field.nr_active_file;
1809     }
1810     return 0;
1811 }
1812 
1813 /* /proc/meminfo parsing routines */
meminfo_parse_line(char * line,union meminfo * mi)1814 static bool meminfo_parse_line(char *line, union meminfo *mi) {
1815     char *cp = line;
1816     char *ap;
1817     char *save_ptr;
1818     int64_t val;
1819     int field_idx;
1820     enum field_match_result match_res;
1821 
1822     cp = strtok_r(line, " ", &save_ptr);
1823     if (!cp) {
1824         return false;
1825     }
1826 
1827     ap = strtok_r(NULL, " ", &save_ptr);
1828     if (!ap) {
1829         return false;
1830     }
1831 
1832     match_res = match_field(cp, ap, meminfo_field_names, MI_FIELD_COUNT,
1833         &val, &field_idx);
1834     if (match_res == PARSE_SUCCESS) {
1835         mi->arr[field_idx] = val / page_k;
1836     }
1837     return (match_res != PARSE_FAIL);
1838 }
1839 
read_gpu_total_kb()1840 static int64_t read_gpu_total_kb() {
1841     static int fd = android::bpf::bpfFdGet(
1842             "/sys/fs/bpf/map_gpuMem_gpu_mem_total_map", BPF_F_RDONLY);
1843     static constexpr uint64_t kBpfKeyGpuTotalUsage = 0;
1844     uint64_t value;
1845 
1846     if (fd < 0) {
1847         return 0;
1848     }
1849 
1850     return android::bpf::findMapEntry(fd, &kBpfKeyGpuTotalUsage, &value)
1851             ? 0
1852             : (int32_t)(value / 1024);
1853 }
1854 
meminfo_parse(union meminfo * mi)1855 static int meminfo_parse(union meminfo *mi) {
1856     static struct reread_data file_data = {
1857         .filename = MEMINFO_PATH,
1858         .fd = -1,
1859     };
1860     char *buf;
1861     char *save_ptr;
1862     char *line;
1863 
1864     memset(mi, 0, sizeof(union meminfo));
1865 
1866     if ((buf = reread_file(&file_data)) == NULL) {
1867         return -1;
1868     }
1869 
1870     for (line = strtok_r(buf, "\n", &save_ptr); line;
1871          line = strtok_r(NULL, "\n", &save_ptr)) {
1872         if (!meminfo_parse_line(line, mi)) {
1873             ALOGE("%s parse error", file_data.filename);
1874             return -1;
1875         }
1876     }
1877     mi->field.nr_file_pages = mi->field.cached + mi->field.swap_cached +
1878         mi->field.buffers;
1879     mi->field.total_gpu_kb = read_gpu_total_kb();
1880     mi->field.easy_available = mi->field.nr_free_pages + mi->field.inactive_file;
1881 
1882     return 0;
1883 }
1884 
1885 // In the case of ZRAM, mi->field.free_swap can't be used directly because swap space is taken
1886 // from the free memory or reclaimed. Use the lowest of free_swap and easily available memory to
1887 // measure free swap because they represent how much swap space the system will consider to use
1888 // and how much it can actually use.
get_free_swap(union meminfo * mi)1889 static inline int64_t get_free_swap(union meminfo *mi) {
1890     return std::min(mi->field.free_swap, mi->field.easy_available);
1891 }
1892 
1893 /* /proc/vmstat parsing routines */
vmstat_parse_line(char * line,union vmstat * vs)1894 static bool vmstat_parse_line(char *line, union vmstat *vs) {
1895     char *cp;
1896     char *ap;
1897     char *save_ptr;
1898     int64_t val;
1899     int field_idx;
1900     enum field_match_result match_res;
1901 
1902     cp = strtok_r(line, " ", &save_ptr);
1903     if (!cp) {
1904         return false;
1905     }
1906 
1907     ap = strtok_r(NULL, " ", &save_ptr);
1908     if (!ap) {
1909         return false;
1910     }
1911 
1912     match_res = match_field(cp, ap, vmstat_field_names, VS_FIELD_COUNT,
1913         &val, &field_idx);
1914     if (match_res == PARSE_SUCCESS) {
1915         vs->arr[field_idx] = val;
1916     }
1917     return (match_res != PARSE_FAIL);
1918 }
1919 
vmstat_parse(union vmstat * vs)1920 static int vmstat_parse(union vmstat *vs) {
1921     static struct reread_data file_data = {
1922         .filename = VMSTAT_PATH,
1923         .fd = -1,
1924     };
1925     char *buf;
1926     char *save_ptr;
1927     char *line;
1928 
1929     memset(vs, 0, sizeof(union vmstat));
1930 
1931     if ((buf = reread_file(&file_data)) == NULL) {
1932         return -1;
1933     }
1934 
1935     for (line = strtok_r(buf, "\n", &save_ptr); line;
1936          line = strtok_r(NULL, "\n", &save_ptr)) {
1937         if (!vmstat_parse_line(line, vs)) {
1938             ALOGE("%s parse error", file_data.filename);
1939             return -1;
1940         }
1941     }
1942 
1943     return 0;
1944 }
1945 
psi_parse(struct reread_data * file_data,struct psi_stats stats[],bool full)1946 static int psi_parse(struct reread_data *file_data, struct psi_stats stats[], bool full) {
1947     char *buf;
1948     char *save_ptr;
1949     char *line;
1950 
1951     if ((buf = reread_file(file_data)) == NULL) {
1952         return -1;
1953     }
1954 
1955     line = strtok_r(buf, "\n", &save_ptr);
1956     if (parse_psi_line(line, PSI_SOME, stats)) {
1957         return -1;
1958     }
1959     if (full) {
1960         line = strtok_r(NULL, "\n", &save_ptr);
1961         if (parse_psi_line(line, PSI_FULL, stats)) {
1962             return -1;
1963         }
1964     }
1965 
1966     return 0;
1967 }
1968 
psi_parse_mem(struct psi_data * psi_data)1969 static int psi_parse_mem(struct psi_data *psi_data) {
1970     static struct reread_data file_data = {
1971         .filename = PSI_PATH_MEMORY,
1972         .fd = -1,
1973     };
1974     return psi_parse(&file_data, psi_data->mem_stats, true);
1975 }
1976 
psi_parse_io(struct psi_data * psi_data)1977 static int psi_parse_io(struct psi_data *psi_data) {
1978     static struct reread_data file_data = {
1979         .filename = PSI_PATH_IO,
1980         .fd = -1,
1981     };
1982     return psi_parse(&file_data, psi_data->io_stats, true);
1983 }
1984 
psi_parse_cpu(struct psi_data * psi_data)1985 static int psi_parse_cpu(struct psi_data *psi_data) {
1986     static struct reread_data file_data = {
1987         .filename = PSI_PATH_CPU,
1988         .fd = -1,
1989     };
1990     return psi_parse(&file_data, psi_data->cpu_stats, false);
1991 }
1992 
1993 enum wakeup_reason {
1994     Event,
1995     Polling
1996 };
1997 
1998 struct wakeup_info {
1999     struct timespec wakeup_tm;
2000     struct timespec prev_wakeup_tm;
2001     struct timespec last_event_tm;
2002     int wakeups_since_event;
2003     int skipped_wakeups;
2004 };
2005 
2006 /*
2007  * After the initial memory pressure event is received lmkd schedules periodic wakeups to check
2008  * the memory conditions and kill if needed (polling). This is done because pressure events are
2009  * rate-limited and memory conditions can change in between events. Therefore after the initial
2010  * event there might be multiple wakeups. This function records the wakeup information such as the
2011  * timestamps of the last event and the last wakeup, the number of wakeups since the last event
2012  * and how many of those wakeups were skipped (some wakeups are skipped if previously killed
2013  * process is still freeing its memory).
2014  */
record_wakeup_time(struct timespec * tm,enum wakeup_reason reason,struct wakeup_info * wi)2015 static void record_wakeup_time(struct timespec *tm, enum wakeup_reason reason,
2016                                struct wakeup_info *wi) {
2017     wi->prev_wakeup_tm = wi->wakeup_tm;
2018     wi->wakeup_tm = *tm;
2019     if (reason == Event) {
2020         wi->last_event_tm = *tm;
2021         wi->wakeups_since_event = 0;
2022         wi->skipped_wakeups = 0;
2023     } else {
2024         wi->wakeups_since_event++;
2025     }
2026 }
2027 
2028 struct kill_info {
2029     enum kill_reasons kill_reason;
2030     const char *kill_desc;
2031     int thrashing;
2032     int max_thrashing;
2033 };
2034 
killinfo_log(struct proc * procp,int min_oom_score,int rss_kb,int swap_kb,struct kill_info * ki,union meminfo * mi,struct wakeup_info * wi,struct timespec * tm,struct psi_data * pd)2035 static void killinfo_log(struct proc* procp, int min_oom_score, int rss_kb,
2036                          int swap_kb, struct kill_info *ki, union meminfo *mi,
2037                          struct wakeup_info *wi, struct timespec *tm, struct psi_data *pd) {
2038     /* log process information */
2039     android_log_write_int32(ctx, procp->pid);
2040     android_log_write_int32(ctx, procp->uid);
2041     android_log_write_int32(ctx, procp->oomadj);
2042     android_log_write_int32(ctx, min_oom_score);
2043     android_log_write_int32(ctx, std::min(rss_kb, (int)INT32_MAX));
2044     android_log_write_int32(ctx, ki ? ki->kill_reason : NONE);
2045 
2046     /* log meminfo fields */
2047     for (int field_idx = 0; field_idx < MI_FIELD_COUNT; field_idx++) {
2048         android_log_write_int32(ctx,
2049                                 mi ? std::min(mi->arr[field_idx] * page_k, (int64_t)INT32_MAX) : 0);
2050     }
2051 
2052     /* log lmkd wakeup information */
2053     if (wi) {
2054         android_log_write_int32(ctx, (int32_t)get_time_diff_ms(&wi->last_event_tm, tm));
2055         android_log_write_int32(ctx, (int32_t)get_time_diff_ms(&wi->prev_wakeup_tm, tm));
2056         android_log_write_int32(ctx, wi->wakeups_since_event);
2057         android_log_write_int32(ctx, wi->skipped_wakeups);
2058     } else {
2059         android_log_write_int32(ctx, 0);
2060         android_log_write_int32(ctx, 0);
2061         android_log_write_int32(ctx, 0);
2062         android_log_write_int32(ctx, 0);
2063     }
2064 
2065     android_log_write_int32(ctx, std::min(swap_kb, (int)INT32_MAX));
2066     android_log_write_int32(ctx, mi ? (int32_t)mi->field.total_gpu_kb : 0);
2067     if (ki) {
2068         android_log_write_int32(ctx, ki->thrashing);
2069         android_log_write_int32(ctx, ki->max_thrashing);
2070     } else {
2071         android_log_write_int32(ctx, 0);
2072         android_log_write_int32(ctx, 0);
2073     }
2074 
2075     if (pd) {
2076         android_log_write_float32(ctx, pd->mem_stats[PSI_SOME].avg10);
2077         android_log_write_float32(ctx, pd->mem_stats[PSI_FULL].avg10);
2078         android_log_write_float32(ctx, pd->io_stats[PSI_SOME].avg10);
2079         android_log_write_float32(ctx, pd->io_stats[PSI_FULL].avg10);
2080         android_log_write_float32(ctx, pd->cpu_stats[PSI_SOME].avg10);
2081     } else {
2082         for (int i = 0; i < 5; i++) {
2083             android_log_write_float32(ctx, 0);
2084         }
2085     }
2086 
2087     android_log_write_list(ctx, LOG_ID_EVENTS);
2088     android_log_reset(ctx);
2089 }
2090 
2091 // Note: returned entry is only an anchor and does not hold a valid process info.
2092 // When called from a non-main thread, adjslot_list_lock read lock should be taken.
proc_adj_head(int oomadj)2093 static struct proc *proc_adj_head(int oomadj) {
2094     return (struct proc *)&procadjslot_list[ADJTOSLOT(oomadj)];
2095 }
2096 
2097 // When called from a non-main thread, adjslot_list_lock read lock should be taken.
proc_adj_tail(int oomadj)2098 static struct proc *proc_adj_tail(int oomadj) {
2099     return (struct proc *)adjslot_tail(&procadjslot_list[ADJTOSLOT(oomadj)]);
2100 }
2101 
2102 // When called from a non-main thread, adjslot_list_lock read lock should be taken.
proc_adj_prev(int oomadj,int pid)2103 static struct proc *proc_adj_prev(int oomadj, int pid) {
2104     struct adjslot_list *head = &procadjslot_list[ADJTOSLOT(oomadj)];
2105     struct adjslot_list *curr = adjslot_tail(&procadjslot_list[ADJTOSLOT(oomadj)]);
2106 
2107     while (curr != head) {
2108         if (((struct proc *)curr)->pid == pid) {
2109             return (struct proc *)curr->prev;
2110         }
2111         curr = curr->prev;
2112     }
2113 
2114     return NULL;
2115 }
2116 
2117 // Can be called only from the main thread.
proc_get_heaviest(int oomadj)2118 static struct proc *proc_get_heaviest(int oomadj) {
2119     struct adjslot_list *head = &procadjslot_list[ADJTOSLOT(oomadj)];
2120     struct adjslot_list *curr = head->next;
2121     struct proc *maxprocp = NULL;
2122     int maxsize = 0;
2123     while (curr != head) {
2124         int pid = ((struct proc *)curr)->pid;
2125         int tasksize = proc_get_size(pid);
2126         if (tasksize < 0) {
2127             struct adjslot_list *next = curr->next;
2128             pid_remove(pid);
2129             curr = next;
2130         } else {
2131             if (tasksize > maxsize) {
2132                 maxsize = tasksize;
2133                 maxprocp = (struct proc *)curr;
2134             }
2135             curr = curr->next;
2136         }
2137     }
2138     return maxprocp;
2139 }
2140 
find_victim(int oom_score,int prev_pid,struct proc & target_proc)2141 static bool find_victim(int oom_score, int prev_pid, struct proc &target_proc) {
2142     struct proc *procp;
2143     std::shared_lock lock(adjslot_list_lock);
2144 
2145     if (!prev_pid) {
2146         procp = proc_adj_tail(oom_score);
2147     } else {
2148         procp = proc_adj_prev(oom_score, prev_pid);
2149         if (!procp) {
2150             // pid was removed, restart at the tail
2151             procp = proc_adj_tail(oom_score);
2152         }
2153     }
2154 
2155     // the list is empty at this oom_score or we looped through it
2156     if (!procp || procp == proc_adj_head(oom_score)) {
2157         return false;
2158     }
2159 
2160     // make a copy because original might be destroyed after adjslot_list_lock is released
2161     target_proc = *procp;
2162 
2163     return true;
2164 }
2165 
watchdog_callback()2166 static void watchdog_callback() {
2167     int prev_pid = 0;
2168 
2169     ALOGW("lmkd watchdog timed out!");
2170     for (int oom_score = OOM_SCORE_ADJ_MAX; oom_score >= 0;) {
2171         struct proc target;
2172 
2173         if (!find_victim(oom_score, prev_pid, target)) {
2174             oom_score--;
2175             prev_pid = 0;
2176             continue;
2177         }
2178 
2179         if (target.valid && reaper.kill({ target.pidfd, target.pid, target.uid }, true) == 0) {
2180             ALOGW("lmkd watchdog killed process %d, oom_score_adj %d", target.pid, oom_score);
2181             killinfo_log(&target, 0, 0, 0, NULL, NULL, NULL, NULL, NULL);
2182             // Can't call pid_remove() from non-main thread, therefore just invalidate the record
2183             pid_invalidate(target.pid);
2184             break;
2185         }
2186         prev_pid = target.pid;
2187     }
2188 }
2189 
2190 static Watchdog watchdog(WATCHDOG_TIMEOUT_SEC, watchdog_callback);
2191 
is_kill_pending(void)2192 static bool is_kill_pending(void) {
2193     char buf[24];
2194 
2195     if (last_kill_pid_or_fd < 0) {
2196         return false;
2197     }
2198 
2199     if (pidfd_supported) {
2200         return true;
2201     }
2202 
2203     /* when pidfd is not supported base the decision on /proc/<pid> existence */
2204     snprintf(buf, sizeof(buf), "/proc/%d/", last_kill_pid_or_fd);
2205     if (access(buf, F_OK) == 0) {
2206         return true;
2207     }
2208 
2209     return false;
2210 }
2211 
is_waiting_for_kill(void)2212 static bool is_waiting_for_kill(void) {
2213     return pidfd_supported && last_kill_pid_or_fd >= 0;
2214 }
2215 
stop_wait_for_proc_kill(bool finished)2216 static void stop_wait_for_proc_kill(bool finished) {
2217     struct epoll_event epev;
2218 
2219     if (last_kill_pid_or_fd < 0) {
2220         return;
2221     }
2222 
2223     if (debug_process_killing) {
2224         struct timespec curr_tm;
2225 
2226         if (clock_gettime(CLOCK_MONOTONIC_COARSE, &curr_tm) != 0) {
2227             /*
2228              * curr_tm is used here merely to report kill duration, so this failure is not fatal.
2229              * Log an error and continue.
2230              */
2231             ALOGE("Failed to get current time");
2232         }
2233 
2234         if (finished) {
2235             ALOGI("Process got killed in %ldms",
2236                 get_time_diff_ms(&last_kill_tm, &curr_tm));
2237         } else {
2238             ALOGI("Stop waiting for process kill after %ldms",
2239                 get_time_diff_ms(&last_kill_tm, &curr_tm));
2240         }
2241     }
2242 
2243     if (pidfd_supported) {
2244         /* unregister fd */
2245         if (epoll_ctl(epollfd, EPOLL_CTL_DEL, last_kill_pid_or_fd, &epev)) {
2246             // Log an error and keep going
2247             ALOGE("epoll_ctl for last killed process failed; errno=%d", errno);
2248         }
2249         maxevents--;
2250         close(last_kill_pid_or_fd);
2251     }
2252 
2253     last_kill_pid_or_fd = -1;
2254 }
2255 
kill_done_handler(int data __unused,uint32_t events __unused,struct polling_params * poll_params)2256 static void kill_done_handler(int data __unused, uint32_t events __unused,
2257                               struct polling_params *poll_params) {
2258     stop_wait_for_proc_kill(true);
2259     poll_params->update = POLLING_RESUME;
2260 }
2261 
kill_fail_handler(int data __unused,uint32_t events __unused,struct polling_params * poll_params)2262 static void kill_fail_handler(int data __unused, uint32_t events __unused,
2263                               struct polling_params *poll_params) {
2264     int pid;
2265 
2266     // Extract pid from the communication pipe. Clearing the pipe this way allows further
2267     // epoll_wait calls to sleep until the next event.
2268     if (TEMP_FAILURE_RETRY(read(reaper_comm_fd[0], &pid, sizeof(pid))) != sizeof(pid)) {
2269         ALOGE("thread communication read failed: %s", strerror(errno));
2270     }
2271     stop_wait_for_proc_kill(false);
2272     poll_params->update = POLLING_RESUME;
2273 }
2274 
start_wait_for_proc_kill(int pid_or_fd)2275 static void start_wait_for_proc_kill(int pid_or_fd) {
2276     static struct event_handler_info kill_done_hinfo = { 0, kill_done_handler };
2277     struct epoll_event epev;
2278 
2279     if (last_kill_pid_or_fd >= 0) {
2280         /* Should not happen but if it does we should stop previous wait */
2281         ALOGE("Attempt to wait for a kill while another wait is in progress");
2282         stop_wait_for_proc_kill(false);
2283     }
2284 
2285     last_kill_pid_or_fd = pid_or_fd;
2286 
2287     if (!pidfd_supported) {
2288         /* If pidfd is not supported just store PID and exit */
2289         return;
2290     }
2291 
2292     epev.events = EPOLLIN;
2293     epev.data.ptr = (void *)&kill_done_hinfo;
2294     if (epoll_ctl(epollfd, EPOLL_CTL_ADD, last_kill_pid_or_fd, &epev) != 0) {
2295         ALOGE("epoll_ctl for last kill failed; errno=%d", errno);
2296         close(last_kill_pid_or_fd);
2297         last_kill_pid_or_fd = -1;
2298         return;
2299     }
2300     maxevents++;
2301 }
2302 
2303 /* Kill one process specified by procp.  Returns the size (in pages) of the process killed */
kill_one_process(struct proc * procp,int min_oom_score,struct kill_info * ki,union meminfo * mi,struct wakeup_info * wi,struct timespec * tm,struct psi_data * pd)2304 static int kill_one_process(struct proc* procp, int min_oom_score, struct kill_info *ki,
2305                             union meminfo *mi, struct wakeup_info *wi, struct timespec *tm,
2306                             struct psi_data *pd) {
2307     int pid = procp->pid;
2308     int pidfd = procp->pidfd;
2309     uid_t uid = procp->uid;
2310     char *taskname;
2311     int kill_result;
2312     int result = -1;
2313     struct memory_stat *mem_st;
2314     struct kill_stat kill_st;
2315     int64_t tgid;
2316     int64_t rss_kb;
2317     int64_t swap_kb;
2318     char buf[PAGE_SIZE];
2319     char desc[LINE_MAX];
2320 
2321     if (!procp->valid || !read_proc_status(pid, buf, sizeof(buf))) {
2322         goto out;
2323     }
2324     if (!parse_status_tag(buf, PROC_STATUS_TGID_FIELD, &tgid)) {
2325         ALOGE("Unable to parse tgid from /proc/%d/status", pid);
2326         goto out;
2327     }
2328     if (tgid != pid) {
2329         ALOGE("Possible pid reuse detected (pid %d, tgid %" PRId64 ")!", pid, tgid);
2330         goto out;
2331     }
2332     // Zombie processes will not have RSS / Swap fields.
2333     if (!parse_status_tag(buf, PROC_STATUS_RSS_FIELD, &rss_kb)) {
2334         goto out;
2335     }
2336     if (!parse_status_tag(buf, PROC_STATUS_SWAP_FIELD, &swap_kb)) {
2337         goto out;
2338     }
2339 
2340     taskname = proc_get_name(pid, buf, sizeof(buf));
2341     // taskname will point inside buf, do not reuse buf onwards.
2342     if (!taskname) {
2343         goto out;
2344     }
2345 
2346     mem_st = stats_read_memory_stat(per_app_memcg, pid, uid, rss_kb * 1024, swap_kb * 1024);
2347 
2348     snprintf(desc, sizeof(desc), "lmk,%d,%d,%d,%d,%d", pid, ki ? (int)ki->kill_reason : -1,
2349              procp->oomadj, min_oom_score, ki ? ki->max_thrashing : -1);
2350 
2351     result = lmkd_free_memory_before_kill_hook(procp, rss_kb / page_k, procp->oomadj,
2352                                                ki ? (int)ki->kill_reason : -1);
2353     if (result > 0) {
2354       /*
2355        * Memory was freed elsewhere; no need to kill. Note: intentionally do not
2356        * pid_remove(pid) since it was not killed.
2357        */
2358       ALOGI("Skipping kill; %ld kB freed elsewhere.", result * page_k);
2359       return result;
2360     }
2361 
2362     trace_kill_start(desc);
2363 
2364     start_wait_for_proc_kill(pidfd < 0 ? pid : pidfd);
2365     kill_result = reaper.kill({ pidfd, pid, uid }, false);
2366 
2367     trace_kill_end();
2368 
2369     if (kill_result) {
2370         stop_wait_for_proc_kill(false);
2371         ALOGE("kill(%d): errno=%d", pid, errno);
2372         /* Delete process record even when we fail to kill so that we don't get stuck on it */
2373         goto out;
2374     }
2375 
2376     last_kill_tm = *tm;
2377 
2378     inc_killcnt(procp->oomadj);
2379 
2380     if (ki) {
2381         kill_st.kill_reason = ki->kill_reason;
2382         kill_st.thrashing = ki->thrashing;
2383         kill_st.max_thrashing = ki->max_thrashing;
2384         ALOGI("Kill '%s' (%d), uid %d, oom_score_adj %d to free %" PRId64 "kB rss, %" PRId64
2385               "kB swap; reason: %s", taskname, pid, uid, procp->oomadj, rss_kb, swap_kb,
2386               ki->kill_desc);
2387     } else {
2388         kill_st.kill_reason = NONE;
2389         kill_st.thrashing = 0;
2390         kill_st.max_thrashing = 0;
2391         ALOGI("Kill '%s' (%d), uid %d, oom_score_adj %d to free %" PRId64 "kB rss, %" PRId64
2392               "kb swap", taskname, pid, uid, procp->oomadj, rss_kb, swap_kb);
2393     }
2394     killinfo_log(procp, min_oom_score, rss_kb, swap_kb, ki, mi, wi, tm, pd);
2395 
2396     kill_st.uid = static_cast<int32_t>(uid);
2397     kill_st.taskname = taskname;
2398     kill_st.oom_score = procp->oomadj;
2399     kill_st.min_oom_score = min_oom_score;
2400     kill_st.free_mem_kb = mi->field.nr_free_pages * page_k;
2401     kill_st.free_swap_kb = get_free_swap(mi) * page_k;
2402     stats_write_lmk_kill_occurred(&kill_st, mem_st);
2403 
2404     ctrl_data_write_lmk_kill_occurred((pid_t)pid, uid);
2405 
2406     result = rss_kb / page_k;
2407 
2408 out:
2409     /*
2410      * WARNING: After pid_remove() procp is freed and can't be used!
2411      * Therefore placed at the end of the function.
2412      */
2413     pid_remove(pid);
2414     return result;
2415 }
2416 
2417 /*
2418  * Find one process to kill at or above the given oom_score_adj level.
2419  * Returns size of the killed process.
2420  */
find_and_kill_process(int min_score_adj,struct kill_info * ki,union meminfo * mi,struct wakeup_info * wi,struct timespec * tm,struct psi_data * pd)2421 static int find_and_kill_process(int min_score_adj, struct kill_info *ki, union meminfo *mi,
2422                                  struct wakeup_info *wi, struct timespec *tm,
2423                                  struct psi_data *pd) {
2424     int i;
2425     int killed_size = 0;
2426     bool lmk_state_change_start = false;
2427     bool choose_heaviest_task = kill_heaviest_task;
2428 
2429     for (i = OOM_SCORE_ADJ_MAX; i >= min_score_adj; i--) {
2430         struct proc *procp;
2431 
2432         if (!choose_heaviest_task && i <= PERCEPTIBLE_APP_ADJ) {
2433             /*
2434              * If we have to choose a perceptible process, choose the heaviest one to
2435              * hopefully minimize the number of victims.
2436              */
2437             choose_heaviest_task = true;
2438         }
2439 
2440         while (true) {
2441             procp = choose_heaviest_task ?
2442                 proc_get_heaviest(i) : proc_adj_tail(i);
2443 
2444             if (!procp)
2445                 break;
2446 
2447             killed_size = kill_one_process(procp, min_score_adj, ki, mi, wi, tm, pd);
2448             if (killed_size >= 0) {
2449                 if (!lmk_state_change_start) {
2450                     lmk_state_change_start = true;
2451                     stats_write_lmk_state_changed(STATE_START);
2452                 }
2453                 break;
2454             }
2455         }
2456         if (killed_size) {
2457             break;
2458         }
2459     }
2460 
2461     if (lmk_state_change_start) {
2462         stats_write_lmk_state_changed(STATE_STOP);
2463     }
2464 
2465     return killed_size;
2466 }
2467 
get_memory_usage(struct reread_data * file_data)2468 static int64_t get_memory_usage(struct reread_data *file_data) {
2469     int64_t mem_usage;
2470     char *buf;
2471 
2472     if ((buf = reread_file(file_data)) == NULL) {
2473         return -1;
2474     }
2475 
2476     if (!parse_int64(buf, &mem_usage)) {
2477         ALOGE("%s parse error", file_data->filename);
2478         return -1;
2479     }
2480     if (mem_usage == 0) {
2481         ALOGE("No memory!");
2482         return -1;
2483     }
2484     return mem_usage;
2485 }
2486 
record_low_pressure_levels(union meminfo * mi)2487 void record_low_pressure_levels(union meminfo *mi) {
2488     if (low_pressure_mem.min_nr_free_pages == -1 ||
2489         low_pressure_mem.min_nr_free_pages > mi->field.nr_free_pages) {
2490         if (debug_process_killing) {
2491             ALOGI("Low pressure min memory update from %" PRId64 " to %" PRId64,
2492                 low_pressure_mem.min_nr_free_pages, mi->field.nr_free_pages);
2493         }
2494         low_pressure_mem.min_nr_free_pages = mi->field.nr_free_pages;
2495     }
2496     /*
2497      * Free memory at low vmpressure events occasionally gets spikes,
2498      * possibly a stale low vmpressure event with memory already
2499      * freed up (no memory pressure should have been reported).
2500      * Ignore large jumps in max_nr_free_pages that would mess up our stats.
2501      */
2502     if (low_pressure_mem.max_nr_free_pages == -1 ||
2503         (low_pressure_mem.max_nr_free_pages < mi->field.nr_free_pages &&
2504          mi->field.nr_free_pages - low_pressure_mem.max_nr_free_pages <
2505          low_pressure_mem.max_nr_free_pages * 0.1)) {
2506         if (debug_process_killing) {
2507             ALOGI("Low pressure max memory update from %" PRId64 " to %" PRId64,
2508                 low_pressure_mem.max_nr_free_pages, mi->field.nr_free_pages);
2509         }
2510         low_pressure_mem.max_nr_free_pages = mi->field.nr_free_pages;
2511     }
2512 }
2513 
upgrade_level(enum vmpressure_level level)2514 enum vmpressure_level upgrade_level(enum vmpressure_level level) {
2515     return (enum vmpressure_level)((level < VMPRESS_LEVEL_CRITICAL) ?
2516         level + 1 : level);
2517 }
2518 
downgrade_level(enum vmpressure_level level)2519 enum vmpressure_level downgrade_level(enum vmpressure_level level) {
2520     return (enum vmpressure_level)((level > VMPRESS_LEVEL_LOW) ?
2521         level - 1 : level);
2522 }
2523 
2524 enum zone_watermark {
2525     WMARK_MIN = 0,
2526     WMARK_LOW,
2527     WMARK_HIGH,
2528     WMARK_NONE
2529 };
2530 
2531 struct zone_watermarks {
2532     long high_wmark;
2533     long low_wmark;
2534     long min_wmark;
2535 };
2536 
2537 /*
2538  * Returns lowest breached watermark or WMARK_NONE.
2539  */
get_lowest_watermark(union meminfo * mi,struct zone_watermarks * watermarks)2540 static enum zone_watermark get_lowest_watermark(union meminfo *mi,
2541                                                 struct zone_watermarks *watermarks)
2542 {
2543     int64_t nr_free_pages = mi->field.nr_free_pages - mi->field.cma_free;
2544 
2545     if (nr_free_pages < watermarks->min_wmark) {
2546         return WMARK_MIN;
2547     }
2548     if (nr_free_pages < watermarks->low_wmark) {
2549         return WMARK_LOW;
2550     }
2551     if (nr_free_pages < watermarks->high_wmark) {
2552         return WMARK_HIGH;
2553     }
2554     return WMARK_NONE;
2555 }
2556 
calc_zone_watermarks(struct zoneinfo * zi,struct zone_watermarks * watermarks)2557 void calc_zone_watermarks(struct zoneinfo *zi, struct zone_watermarks *watermarks) {
2558     memset(watermarks, 0, sizeof(struct zone_watermarks));
2559 
2560     for (int node_idx = 0; node_idx < zi->node_count; node_idx++) {
2561         struct zoneinfo_node *node = &zi->nodes[node_idx];
2562         for (int zone_idx = 0; zone_idx < node->zone_count; zone_idx++) {
2563             struct zoneinfo_zone *zone = &node->zones[zone_idx];
2564 
2565             if (!zone->fields.field.present) {
2566                 continue;
2567             }
2568 
2569             watermarks->high_wmark += zone->max_protection + zone->fields.field.high;
2570             watermarks->low_wmark += zone->max_protection + zone->fields.field.low;
2571             watermarks->min_wmark += zone->max_protection + zone->fields.field.min;
2572         }
2573     }
2574 }
2575 
calc_swap_utilization(union meminfo * mi)2576 static int calc_swap_utilization(union meminfo *mi) {
2577     int64_t swap_used = mi->field.total_swap - get_free_swap(mi);
2578     int64_t total_swappable = mi->field.active_anon + mi->field.inactive_anon +
2579                               mi->field.shmem + swap_used;
2580     return total_swappable > 0 ? (swap_used * 100) / total_swappable : 0;
2581 }
2582 
mp_event_psi(int data,uint32_t events,struct polling_params * poll_params)2583 static void mp_event_psi(int data, uint32_t events, struct polling_params *poll_params) {
2584     enum reclaim_state {
2585         NO_RECLAIM = 0,
2586         KSWAPD_RECLAIM,
2587         DIRECT_RECLAIM,
2588     };
2589     static int64_t init_ws_refault;
2590     static int64_t prev_workingset_refault;
2591     static int64_t base_file_lru;
2592     static int64_t init_pgscan_kswapd;
2593     static int64_t init_pgscan_direct;
2594     static bool killing;
2595     static int thrashing_limit = thrashing_limit_pct;
2596     static struct zone_watermarks watermarks;
2597     static struct timespec wmark_update_tm;
2598     static struct wakeup_info wi;
2599     static struct timespec thrashing_reset_tm;
2600     static int64_t prev_thrash_growth = 0;
2601     static bool check_filecache = false;
2602     static int max_thrashing = 0;
2603 
2604     union meminfo mi;
2605     union vmstat vs;
2606     struct psi_data psi_data;
2607     struct timespec curr_tm;
2608     int64_t thrashing = 0;
2609     bool swap_is_low = false;
2610     enum vmpressure_level level = (enum vmpressure_level)data;
2611     enum kill_reasons kill_reason = NONE;
2612     bool cycle_after_kill = false;
2613     enum reclaim_state reclaim = NO_RECLAIM;
2614     enum zone_watermark wmark = WMARK_NONE;
2615     char kill_desc[LINE_MAX];
2616     bool cut_thrashing_limit = false;
2617     int min_score_adj = 0;
2618     int swap_util = 0;
2619     int64_t swap_low_threshold;
2620     long since_thrashing_reset_ms;
2621     int64_t workingset_refault_file;
2622     bool critical_stall = false;
2623 
2624     if (clock_gettime(CLOCK_MONOTONIC_COARSE, &curr_tm) != 0) {
2625         ALOGE("Failed to get current time");
2626         return;
2627     }
2628 
2629     record_wakeup_time(&curr_tm, events ? Event : Polling, &wi);
2630 
2631     bool kill_pending = is_kill_pending();
2632     if (kill_pending && (kill_timeout_ms == 0 ||
2633         get_time_diff_ms(&last_kill_tm, &curr_tm) < static_cast<long>(kill_timeout_ms))) {
2634         /* Skip while still killing a process */
2635         wi.skipped_wakeups++;
2636         goto no_kill;
2637     }
2638     /*
2639      * Process is dead or kill timeout is over, stop waiting. This has no effect if pidfds are
2640      * supported and death notification already caused waiting to stop.
2641      */
2642     stop_wait_for_proc_kill(!kill_pending);
2643 
2644     if (vmstat_parse(&vs) < 0) {
2645         ALOGE("Failed to parse vmstat!");
2646         return;
2647     }
2648     /* Starting 5.9 kernel workingset_refault vmstat field was renamed workingset_refault_file */
2649     workingset_refault_file = vs.field.workingset_refault ? : vs.field.workingset_refault_file;
2650 
2651     if (meminfo_parse(&mi) < 0) {
2652         ALOGE("Failed to parse meminfo!");
2653         return;
2654     }
2655 
2656     /* Reset states after process got killed */
2657     if (killing) {
2658         killing = false;
2659         cycle_after_kill = true;
2660         /* Reset file-backed pagecache size and refault amounts after a kill */
2661         base_file_lru = vs.field.nr_inactive_file + vs.field.nr_active_file;
2662         init_ws_refault = workingset_refault_file;
2663         thrashing_reset_tm = curr_tm;
2664         prev_thrash_growth = 0;
2665     }
2666 
2667     /* Check free swap levels */
2668     if (swap_free_low_percentage) {
2669         swap_low_threshold = mi.field.total_swap * swap_free_low_percentage / 100;
2670         swap_is_low = get_free_swap(&mi) < swap_low_threshold;
2671     } else {
2672         swap_low_threshold = 0;
2673     }
2674 
2675     /* Identify reclaim state */
2676     if (vs.field.pgscan_direct != init_pgscan_direct) {
2677         init_pgscan_direct = vs.field.pgscan_direct;
2678         init_pgscan_kswapd = vs.field.pgscan_kswapd;
2679         reclaim = DIRECT_RECLAIM;
2680     } else if (vs.field.pgscan_kswapd != init_pgscan_kswapd) {
2681         init_pgscan_kswapd = vs.field.pgscan_kswapd;
2682         reclaim = KSWAPD_RECLAIM;
2683     } else if (workingset_refault_file == prev_workingset_refault) {
2684         /*
2685          * Device is not thrashing and not reclaiming, bail out early until we see these stats
2686          * changing
2687          */
2688         goto no_kill;
2689     }
2690 
2691     prev_workingset_refault = workingset_refault_file;
2692 
2693      /*
2694      * It's possible we fail to find an eligible process to kill (ex. no process is
2695      * above oom_adj_min). When this happens, we should retry to find a new process
2696      * for a kill whenever a new eligible process is available. This is especially
2697      * important for a slow growing refault case. While retrying, we should keep
2698      * monitoring new thrashing counter as someone could release the memory to mitigate
2699      * the thrashing. Thus, when thrashing reset window comes, we decay the prev thrashing
2700      * counter by window counts. If the counter is still greater than thrashing limit,
2701      * we preserve the current prev_thrash counter so we will retry kill again. Otherwise,
2702      * we reset the prev_thrash counter so we will stop retrying.
2703      */
2704     since_thrashing_reset_ms = get_time_diff_ms(&thrashing_reset_tm, &curr_tm);
2705     if (since_thrashing_reset_ms > THRASHING_RESET_INTERVAL_MS) {
2706         long windows_passed;
2707         /* Calculate prev_thrash_growth if we crossed THRASHING_RESET_INTERVAL_MS */
2708         prev_thrash_growth = (workingset_refault_file - init_ws_refault) * 100
2709                             / (base_file_lru + 1);
2710         windows_passed = (since_thrashing_reset_ms / THRASHING_RESET_INTERVAL_MS);
2711         /*
2712          * Decay prev_thrashing unless over-the-limit thrashing was registered in the window we
2713          * just crossed, which means there were no eligible processes to kill. We preserve the
2714          * counter in that case to ensure a kill if a new eligible process appears.
2715          */
2716         if (windows_passed > 1 || prev_thrash_growth < thrashing_limit) {
2717             prev_thrash_growth >>= windows_passed;
2718         }
2719 
2720         /* Record file-backed pagecache size when crossing THRASHING_RESET_INTERVAL_MS */
2721         base_file_lru = vs.field.nr_inactive_file + vs.field.nr_active_file;
2722         init_ws_refault = workingset_refault_file;
2723         thrashing_reset_tm = curr_tm;
2724         thrashing_limit = thrashing_limit_pct;
2725     } else {
2726         /* Calculate what % of the file-backed pagecache refaulted so far */
2727         thrashing = (workingset_refault_file - init_ws_refault) * 100 / (base_file_lru + 1);
2728     }
2729     /* Add previous cycle's decayed thrashing amount */
2730     thrashing += prev_thrash_growth;
2731     if (max_thrashing < thrashing) {
2732         max_thrashing = thrashing;
2733     }
2734 
2735     /*
2736      * Refresh watermarks once per min in case user updated one of the margins.
2737      * TODO: b/140521024 replace this periodic update with an API for AMS to notify LMKD
2738      * that zone watermarks were changed by the system software.
2739      */
2740     if (watermarks.high_wmark == 0 || get_time_diff_ms(&wmark_update_tm, &curr_tm) > 60000) {
2741         struct zoneinfo zi;
2742 
2743         if (zoneinfo_parse(&zi) < 0) {
2744             ALOGE("Failed to parse zoneinfo!");
2745             return;
2746         }
2747 
2748         calc_zone_watermarks(&zi, &watermarks);
2749         wmark_update_tm = curr_tm;
2750     }
2751 
2752     /* Find out which watermark is breached if any */
2753     wmark = get_lowest_watermark(&mi, &watermarks);
2754 
2755     if (!psi_parse_mem(&psi_data)) {
2756         critical_stall = psi_data.mem_stats[PSI_FULL].avg10 > (float)stall_limit_critical;
2757     }
2758     /*
2759      * TODO: move this logic into a separate function
2760      * Decide if killing a process is necessary and record the reason
2761      */
2762     if (cycle_after_kill && wmark < WMARK_LOW) {
2763         /*
2764          * Prevent kills not freeing enough memory which might lead to OOM kill.
2765          * This might happen when a process is consuming memory faster than reclaim can
2766          * free even after a kill. Mostly happens when running memory stress tests.
2767          */
2768         kill_reason = PRESSURE_AFTER_KILL;
2769         strncpy(kill_desc, "min watermark is breached even after kill", sizeof(kill_desc));
2770     } else if (level == VMPRESS_LEVEL_CRITICAL && events != 0) {
2771         /*
2772          * Device is too busy reclaiming memory which might lead to ANR.
2773          * Critical level is triggered when PSI complete stall (all tasks are blocked because
2774          * of the memory congestion) breaches the configured threshold.
2775          */
2776         kill_reason = NOT_RESPONDING;
2777         strncpy(kill_desc, "device is not responding", sizeof(kill_desc));
2778     } else if (swap_is_low && thrashing > thrashing_limit_pct) {
2779         /* Page cache is thrashing while swap is low */
2780         kill_reason = LOW_SWAP_AND_THRASHING;
2781         snprintf(kill_desc, sizeof(kill_desc), "device is low on swap (%" PRId64
2782             "kB < %" PRId64 "kB) and thrashing (%" PRId64 "%%)",
2783             get_free_swap(&mi) * page_k, swap_low_threshold * page_k, thrashing);
2784         /* Do not kill perceptible apps unless below min watermark or heavily thrashing */
2785         if (wmark > WMARK_MIN && thrashing < thrashing_critical_pct) {
2786             min_score_adj = PERCEPTIBLE_APP_ADJ + 1;
2787         }
2788         check_filecache = true;
2789     } else if (swap_is_low && wmark < WMARK_HIGH) {
2790         /* Both free memory and swap are low */
2791         kill_reason = LOW_MEM_AND_SWAP;
2792         snprintf(kill_desc, sizeof(kill_desc), "%s watermark is breached and swap is low (%"
2793             PRId64 "kB < %" PRId64 "kB)", wmark < WMARK_LOW ? "min" : "low",
2794             get_free_swap(&mi) * page_k, swap_low_threshold * page_k);
2795         /* Do not kill perceptible apps unless below min watermark or heavily thrashing */
2796         if (wmark > WMARK_MIN && thrashing < thrashing_critical_pct) {
2797             min_score_adj = PERCEPTIBLE_APP_ADJ + 1;
2798         }
2799     } else if (wmark < WMARK_HIGH && swap_util_max < 100 &&
2800                (swap_util = calc_swap_utilization(&mi)) > swap_util_max) {
2801         /*
2802          * Too much anon memory is swapped out but swap is not low.
2803          * Non-swappable allocations created memory pressure.
2804          */
2805         kill_reason = LOW_MEM_AND_SWAP_UTIL;
2806         snprintf(kill_desc, sizeof(kill_desc), "%s watermark is breached and swap utilization"
2807             " is high (%d%% > %d%%)", wmark < WMARK_LOW ? "min" : "low",
2808             swap_util, swap_util_max);
2809     } else if (wmark < WMARK_HIGH && thrashing > thrashing_limit) {
2810         /* Page cache is thrashing while memory is low */
2811         kill_reason = LOW_MEM_AND_THRASHING;
2812         snprintf(kill_desc, sizeof(kill_desc), "%s watermark is breached and thrashing (%"
2813             PRId64 "%%)", wmark < WMARK_LOW ? "min" : "low", thrashing);
2814         cut_thrashing_limit = true;
2815         /* Do not kill perceptible apps unless thrashing at critical levels */
2816         if (thrashing < thrashing_critical_pct) {
2817             min_score_adj = PERCEPTIBLE_APP_ADJ + 1;
2818         }
2819         check_filecache = true;
2820     } else if (reclaim == DIRECT_RECLAIM && thrashing > thrashing_limit) {
2821         /* Page cache is thrashing while in direct reclaim (mostly happens on lowram devices) */
2822         kill_reason = DIRECT_RECL_AND_THRASHING;
2823         snprintf(kill_desc, sizeof(kill_desc), "device is in direct reclaim and thrashing (%"
2824             PRId64 "%%)", thrashing);
2825         cut_thrashing_limit = true;
2826         /* Do not kill perceptible apps unless thrashing at critical levels */
2827         if (thrashing < thrashing_critical_pct) {
2828             min_score_adj = PERCEPTIBLE_APP_ADJ + 1;
2829         }
2830         check_filecache = true;
2831     } else if (check_filecache) {
2832         int64_t file_lru_kb = (vs.field.nr_inactive_file + vs.field.nr_active_file) * page_k;
2833 
2834         if (file_lru_kb < filecache_min_kb) {
2835             /* File cache is too low after thrashing, keep killing background processes */
2836             kill_reason = LOW_FILECACHE_AFTER_THRASHING;
2837             snprintf(kill_desc, sizeof(kill_desc),
2838                 "filecache is low (%" PRId64 "kB < %" PRId64 "kB) after thrashing",
2839                 file_lru_kb, filecache_min_kb);
2840             min_score_adj = PERCEPTIBLE_APP_ADJ + 1;
2841         } else {
2842             /* File cache is big enough, stop checking */
2843             check_filecache = false;
2844         }
2845     }
2846 
2847     /* Kill a process if necessary */
2848     if (kill_reason != NONE) {
2849         struct kill_info ki = {
2850             .kill_reason = kill_reason,
2851             .kill_desc = kill_desc,
2852             .thrashing = (int)thrashing,
2853             .max_thrashing = max_thrashing,
2854         };
2855 
2856         /* Allow killing perceptible apps if the system is stalled */
2857         if (critical_stall) {
2858             min_score_adj = 0;
2859         }
2860         psi_parse_io(&psi_data);
2861         psi_parse_cpu(&psi_data);
2862         int pages_freed = find_and_kill_process(min_score_adj, &ki, &mi, &wi, &curr_tm, &psi_data);
2863         if (pages_freed > 0) {
2864             killing = true;
2865             max_thrashing = 0;
2866             if (cut_thrashing_limit) {
2867                 /*
2868                  * Cut thrasing limit by thrashing_limit_decay_pct percentage of the current
2869                  * thrashing limit until the system stops thrashing.
2870                  */
2871                 thrashing_limit = (thrashing_limit * (100 - thrashing_limit_decay_pct)) / 100;
2872             }
2873         }
2874     }
2875 
2876 no_kill:
2877     /* Do not poll if kernel supports pidfd waiting */
2878     if (is_waiting_for_kill()) {
2879         /* Pause polling if we are waiting for process death notification */
2880         poll_params->update = POLLING_PAUSE;
2881         return;
2882     }
2883 
2884     /*
2885      * Start polling after initial PSI event;
2886      * extend polling while device is in direct reclaim or process is being killed;
2887      * do not extend when kswapd reclaims because that might go on for a long time
2888      * without causing memory pressure
2889      */
2890     if (events || killing || reclaim == DIRECT_RECLAIM) {
2891         poll_params->update = POLLING_START;
2892     }
2893 
2894     /* Decide the polling interval */
2895     if (swap_is_low || killing) {
2896         /* Fast polling during and after a kill or when swap is low */
2897         poll_params->polling_interval_ms = PSI_POLL_PERIOD_SHORT_MS;
2898     } else {
2899         /* By default use long intervals */
2900         poll_params->polling_interval_ms = PSI_POLL_PERIOD_LONG_MS;
2901     }
2902 }
2903 
GetCgroupAttributePath(const char * attr)2904 static std::string GetCgroupAttributePath(const char* attr) {
2905     std::string path;
2906     if (!CgroupGetAttributePath(attr, &path)) {
2907         ALOGE("Unknown cgroup attribute %s", attr);
2908     }
2909     return path;
2910 }
2911 
2912 // The implementation of this function relies on memcg statistics that are only available in the
2913 // v1 cgroup hierarchy.
mp_event_common(int data,uint32_t events,struct polling_params * poll_params)2914 static void mp_event_common(int data, uint32_t events, struct polling_params *poll_params) {
2915     unsigned long long evcount;
2916     int64_t mem_usage, memsw_usage;
2917     int64_t mem_pressure;
2918     union meminfo mi;
2919     struct zoneinfo zi;
2920     struct timespec curr_tm;
2921     static unsigned long kill_skip_count = 0;
2922     enum vmpressure_level level = (enum vmpressure_level)data;
2923     long other_free = 0, other_file = 0;
2924     int min_score_adj;
2925     int minfree = 0;
2926     static const std::string mem_usage_path = GetCgroupAttributePath("MemUsage");
2927     static struct reread_data mem_usage_file_data = {
2928         .filename = mem_usage_path.c_str(),
2929         .fd = -1,
2930     };
2931     static const std::string memsw_usage_path = GetCgroupAttributePath("MemAndSwapUsage");
2932     static struct reread_data memsw_usage_file_data = {
2933         .filename = memsw_usage_path.c_str(),
2934         .fd = -1,
2935     };
2936     static struct wakeup_info wi;
2937 
2938     if (debug_process_killing) {
2939         ALOGI("%s memory pressure event is triggered", level_name[level]);
2940     }
2941 
2942     if (!use_psi_monitors) {
2943         /*
2944          * Check all event counters from low to critical
2945          * and upgrade to the highest priority one. By reading
2946          * eventfd we also reset the event counters.
2947          */
2948         for (int lvl = VMPRESS_LEVEL_LOW; lvl < VMPRESS_LEVEL_COUNT; lvl++) {
2949             if (mpevfd[lvl] != -1 &&
2950                 TEMP_FAILURE_RETRY(read(mpevfd[lvl],
2951                                    &evcount, sizeof(evcount))) > 0 &&
2952                 evcount > 0 && lvl > level) {
2953                 level = static_cast<vmpressure_level>(lvl);
2954             }
2955         }
2956     }
2957 
2958     /* Start polling after initial PSI event */
2959     if (use_psi_monitors && events) {
2960         /* Override polling params only if current event is more critical */
2961         if (!poll_params->poll_handler || data > poll_params->poll_handler->data) {
2962             poll_params->polling_interval_ms = PSI_POLL_PERIOD_SHORT_MS;
2963             poll_params->update = POLLING_START;
2964         }
2965     }
2966 
2967     if (clock_gettime(CLOCK_MONOTONIC_COARSE, &curr_tm) != 0) {
2968         ALOGE("Failed to get current time");
2969         return;
2970     }
2971 
2972     record_wakeup_time(&curr_tm, events ? Event : Polling, &wi);
2973 
2974     if (kill_timeout_ms &&
2975         get_time_diff_ms(&last_kill_tm, &curr_tm) < static_cast<long>(kill_timeout_ms)) {
2976         /*
2977          * If we're within the no-kill timeout, see if there's pending reclaim work
2978          * from the last killed process. If so, skip killing for now.
2979          */
2980         if (is_kill_pending()) {
2981             kill_skip_count++;
2982             wi.skipped_wakeups++;
2983             return;
2984         }
2985         /*
2986          * Process is dead, stop waiting. This has no effect if pidfds are supported and
2987          * death notification already caused waiting to stop.
2988          */
2989         stop_wait_for_proc_kill(true);
2990     } else {
2991         /*
2992          * Killing took longer than no-kill timeout. Stop waiting for the last process
2993          * to die because we are ready to kill again.
2994          */
2995         stop_wait_for_proc_kill(false);
2996     }
2997 
2998     if (kill_skip_count > 0) {
2999         ALOGI("%lu memory pressure events were skipped after a kill!",
3000               kill_skip_count);
3001         kill_skip_count = 0;
3002     }
3003 
3004     if (meminfo_parse(&mi) < 0 || zoneinfo_parse(&zi) < 0) {
3005         ALOGE("Failed to get free memory!");
3006         return;
3007     }
3008 
3009     if (use_minfree_levels) {
3010         int i;
3011 
3012         other_free = mi.field.nr_free_pages - zi.totalreserve_pages;
3013         if (mi.field.nr_file_pages > (mi.field.shmem + mi.field.unevictable + mi.field.swap_cached)) {
3014             other_file = (mi.field.nr_file_pages - mi.field.shmem -
3015                           mi.field.unevictable - mi.field.swap_cached);
3016         } else {
3017             other_file = 0;
3018         }
3019 
3020         min_score_adj = OOM_SCORE_ADJ_MAX + 1;
3021         for (i = 0; i < lowmem_targets_size; i++) {
3022             minfree = lowmem_minfree[i];
3023             if (other_free < minfree && other_file < minfree) {
3024                 min_score_adj = lowmem_adj[i];
3025                 break;
3026             }
3027         }
3028 
3029         if (min_score_adj == OOM_SCORE_ADJ_MAX + 1) {
3030             if (debug_process_killing && lowmem_targets_size) {
3031                 ALOGI("Ignore %s memory pressure event "
3032                       "(free memory=%ldkB, cache=%ldkB, limit=%ldkB)",
3033                       level_name[level], other_free * page_k, other_file * page_k,
3034                       (long)lowmem_minfree[lowmem_targets_size - 1] * page_k);
3035             }
3036             return;
3037         }
3038 
3039         goto do_kill;
3040     }
3041 
3042     if (level == VMPRESS_LEVEL_LOW) {
3043         record_low_pressure_levels(&mi);
3044     }
3045 
3046     if (level_oomadj[level] > OOM_SCORE_ADJ_MAX) {
3047         /* Do not monitor this pressure level */
3048         return;
3049     }
3050 
3051     if ((mem_usage = get_memory_usage(&mem_usage_file_data)) < 0) {
3052         goto do_kill;
3053     }
3054     if ((memsw_usage = get_memory_usage(&memsw_usage_file_data)) < 0) {
3055         goto do_kill;
3056     }
3057 
3058     // Calculate percent for swappinness.
3059     mem_pressure = (mem_usage * 100) / memsw_usage;
3060 
3061     if (enable_pressure_upgrade && level != VMPRESS_LEVEL_CRITICAL) {
3062         // We are swapping too much.
3063         if (mem_pressure < upgrade_pressure) {
3064             level = upgrade_level(level);
3065             if (debug_process_killing) {
3066                 ALOGI("Event upgraded to %s", level_name[level]);
3067             }
3068         }
3069     }
3070 
3071     // If we still have enough swap space available, check if we want to
3072     // ignore/downgrade pressure events.
3073     if (get_free_swap(&mi) >=
3074         mi.field.total_swap * swap_free_low_percentage / 100) {
3075         // If the pressure is larger than downgrade_pressure lmk will not
3076         // kill any process, since enough memory is available.
3077         if (mem_pressure > downgrade_pressure) {
3078             if (debug_process_killing) {
3079                 ALOGI("Ignore %s memory pressure", level_name[level]);
3080             }
3081             return;
3082         } else if (level == VMPRESS_LEVEL_CRITICAL && mem_pressure > upgrade_pressure) {
3083             if (debug_process_killing) {
3084                 ALOGI("Downgrade critical memory pressure");
3085             }
3086             // Downgrade event, since enough memory available.
3087             level = downgrade_level(level);
3088         }
3089     }
3090 
3091 do_kill:
3092     if (low_ram_device) {
3093         /* For Go devices kill only one task */
3094         if (find_and_kill_process(use_minfree_levels ? min_score_adj : level_oomadj[level],
3095                                   NULL, &mi, &wi, &curr_tm, NULL) == 0) {
3096             if (debug_process_killing) {
3097                 ALOGI("Nothing to kill");
3098             }
3099         }
3100     } else {
3101         int pages_freed;
3102         static struct timespec last_report_tm;
3103         static unsigned long report_skip_count = 0;
3104 
3105         if (!use_minfree_levels) {
3106             /* Free up enough memory to downgrate the memory pressure to low level */
3107             if (mi.field.nr_free_pages >= low_pressure_mem.max_nr_free_pages) {
3108                 if (debug_process_killing) {
3109                     ALOGI("Ignoring pressure since more memory is "
3110                         "available (%" PRId64 ") than watermark (%" PRId64 ")",
3111                         mi.field.nr_free_pages, low_pressure_mem.max_nr_free_pages);
3112                 }
3113                 return;
3114             }
3115             min_score_adj = level_oomadj[level];
3116         }
3117 
3118         pages_freed = find_and_kill_process(min_score_adj, NULL, &mi, &wi, &curr_tm, NULL);
3119 
3120         if (pages_freed == 0) {
3121             /* Rate limit kill reports when nothing was reclaimed */
3122             if (get_time_diff_ms(&last_report_tm, &curr_tm) < FAIL_REPORT_RLIMIT_MS) {
3123                 report_skip_count++;
3124                 return;
3125             }
3126         }
3127 
3128         /* Log whenever we kill or when report rate limit allows */
3129         if (use_minfree_levels) {
3130             ALOGI("Reclaimed %ldkB, cache(%ldkB) and free(%" PRId64 "kB)-reserved(%" PRId64 "kB) "
3131                 "below min(%ldkB) for oom_score_adj %d",
3132                 pages_freed * page_k,
3133                 other_file * page_k, mi.field.nr_free_pages * page_k,
3134                 zi.totalreserve_pages * page_k,
3135                 minfree * page_k, min_score_adj);
3136         } else {
3137             ALOGI("Reclaimed %ldkB at oom_score_adj %d", pages_freed * page_k, min_score_adj);
3138         }
3139 
3140         if (report_skip_count > 0) {
3141             ALOGI("Suppressed %lu failed kill reports", report_skip_count);
3142             report_skip_count = 0;
3143         }
3144 
3145         last_report_tm = curr_tm;
3146     }
3147     if (is_waiting_for_kill()) {
3148         /* pause polling if we are waiting for process death notification */
3149         poll_params->update = POLLING_PAUSE;
3150     }
3151 }
3152 
init_mp_psi(enum vmpressure_level level,bool use_new_strategy)3153 static bool init_mp_psi(enum vmpressure_level level, bool use_new_strategy) {
3154     int fd;
3155 
3156     /* Do not register a handler if threshold_ms is not set */
3157     if (!psi_thresholds[level].threshold_ms) {
3158         return true;
3159     }
3160 
3161     fd = init_psi_monitor(psi_thresholds[level].stall_type,
3162         psi_thresholds[level].threshold_ms * US_PER_MS,
3163         PSI_WINDOW_SIZE_MS * US_PER_MS);
3164 
3165     if (fd < 0) {
3166         return false;
3167     }
3168 
3169     vmpressure_hinfo[level].handler = use_new_strategy ? mp_event_psi : mp_event_common;
3170     vmpressure_hinfo[level].data = level;
3171     if (register_psi_monitor(epollfd, fd, &vmpressure_hinfo[level]) < 0) {
3172         destroy_psi_monitor(fd);
3173         return false;
3174     }
3175     maxevents++;
3176     mpevfd[level] = fd;
3177 
3178     return true;
3179 }
3180 
destroy_mp_psi(enum vmpressure_level level)3181 static void destroy_mp_psi(enum vmpressure_level level) {
3182     int fd = mpevfd[level];
3183 
3184     if (fd < 0) {
3185         return;
3186     }
3187 
3188     if (unregister_psi_monitor(epollfd, fd) < 0) {
3189         ALOGE("Failed to unregister psi monitor for %s memory pressure; errno=%d",
3190             level_name[level], errno);
3191     }
3192     maxevents--;
3193     destroy_psi_monitor(fd);
3194     mpevfd[level] = -1;
3195 }
3196 
3197 enum class MemcgVersion {
3198     kNotFound,
3199     kV1,
3200     kV2,
3201 };
3202 
__memcg_version()3203 static MemcgVersion __memcg_version() {
3204     std::string cgroupv2_path, memcg_path;
3205 
3206     if (!CgroupGetControllerPath("memory", &memcg_path)) {
3207         return MemcgVersion::kNotFound;
3208     }
3209     return CgroupGetControllerPath(CGROUPV2_CONTROLLER_NAME, &cgroupv2_path) &&
3210                            cgroupv2_path == memcg_path
3211                    ? MemcgVersion::kV2
3212                    : MemcgVersion::kV1;
3213 }
3214 
memcg_version()3215 static MemcgVersion memcg_version() {
3216     static MemcgVersion version = __memcg_version();
3217 
3218     return version;
3219 }
3220 
init_psi_monitors()3221 static bool init_psi_monitors() {
3222     /*
3223      * When PSI is used on low-ram devices or on high-end devices without memfree levels
3224      * use new kill strategy based on zone watermarks, free swap and thrashing stats.
3225      * Also use the new strategy if memcg has not been mounted in the v1 cgroups hiearchy since
3226      * the old strategy relies on memcg attributes that are available only in the v1 cgroups
3227      * hiearchy.
3228      */
3229     bool use_new_strategy =
3230         GET_LMK_PROPERTY(bool, "use_new_strategy", low_ram_device || !use_minfree_levels);
3231     if (!use_new_strategy && memcg_version() != MemcgVersion::kV1) {
3232         ALOGE("Old kill strategy can only be used with v1 cgroup hierarchy");
3233         return false;
3234     }
3235     /* In default PSI mode override stall amounts using system properties */
3236     if (use_new_strategy) {
3237         /* Do not use low pressure level */
3238         psi_thresholds[VMPRESS_LEVEL_LOW].threshold_ms = 0;
3239         psi_thresholds[VMPRESS_LEVEL_MEDIUM].threshold_ms = psi_partial_stall_ms;
3240         psi_thresholds[VMPRESS_LEVEL_CRITICAL].threshold_ms = psi_complete_stall_ms;
3241     }
3242 
3243     if (!init_mp_psi(VMPRESS_LEVEL_LOW, use_new_strategy)) {
3244         return false;
3245     }
3246     if (!init_mp_psi(VMPRESS_LEVEL_MEDIUM, use_new_strategy)) {
3247         destroy_mp_psi(VMPRESS_LEVEL_LOW);
3248         return false;
3249     }
3250     if (!init_mp_psi(VMPRESS_LEVEL_CRITICAL, use_new_strategy)) {
3251         destroy_mp_psi(VMPRESS_LEVEL_MEDIUM);
3252         destroy_mp_psi(VMPRESS_LEVEL_LOW);
3253         return false;
3254     }
3255     return true;
3256 }
3257 
init_mp_common(enum vmpressure_level level)3258 static bool init_mp_common(enum vmpressure_level level) {
3259     // The implementation of this function relies on memcg statistics that are only available in the
3260     // v1 cgroup hierarchy.
3261     if (memcg_version() != MemcgVersion::kV1) {
3262         ALOGE("%s: global monitoring is only available for the v1 cgroup hierarchy", __func__);
3263         return false;
3264     }
3265 
3266     int mpfd;
3267     int evfd;
3268     int evctlfd;
3269     char buf[256];
3270     struct epoll_event epev;
3271     int ret;
3272     int level_idx = (int)level;
3273     const char *levelstr = level_name[level_idx];
3274 
3275     /* gid containing AID_SYSTEM required */
3276     mpfd = open(GetCgroupAttributePath("MemPressureLevel").c_str(), O_RDONLY | O_CLOEXEC);
3277     if (mpfd < 0) {
3278         ALOGI("No kernel memory.pressure_level support (errno=%d)", errno);
3279         goto err_open_mpfd;
3280     }
3281 
3282     evctlfd = open(GetCgroupAttributePath("MemCgroupEventControl").c_str(), O_WRONLY | O_CLOEXEC);
3283     if (evctlfd < 0) {
3284         ALOGI("No kernel memory cgroup event control (errno=%d)", errno);
3285         goto err_open_evctlfd;
3286     }
3287 
3288     evfd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
3289     if (evfd < 0) {
3290         ALOGE("eventfd failed for level %s; errno=%d", levelstr, errno);
3291         goto err_eventfd;
3292     }
3293 
3294     ret = snprintf(buf, sizeof(buf), "%d %d %s", evfd, mpfd, levelstr);
3295     if (ret >= (ssize_t)sizeof(buf)) {
3296         ALOGE("cgroup.event_control line overflow for level %s", levelstr);
3297         goto err;
3298     }
3299 
3300     ret = TEMP_FAILURE_RETRY(write(evctlfd, buf, strlen(buf) + 1));
3301     if (ret == -1) {
3302         ALOGE("cgroup.event_control write failed for level %s; errno=%d",
3303               levelstr, errno);
3304         goto err;
3305     }
3306 
3307     epev.events = EPOLLIN;
3308     /* use data to store event level */
3309     vmpressure_hinfo[level_idx].data = level_idx;
3310     vmpressure_hinfo[level_idx].handler = mp_event_common;
3311     epev.data.ptr = (void *)&vmpressure_hinfo[level_idx];
3312     ret = epoll_ctl(epollfd, EPOLL_CTL_ADD, evfd, &epev);
3313     if (ret == -1) {
3314         ALOGE("epoll_ctl for level %s failed; errno=%d", levelstr, errno);
3315         goto err;
3316     }
3317     maxevents++;
3318     mpevfd[level] = evfd;
3319     close(evctlfd);
3320     return true;
3321 
3322 err:
3323     close(evfd);
3324 err_eventfd:
3325     close(evctlfd);
3326 err_open_evctlfd:
3327     close(mpfd);
3328 err_open_mpfd:
3329     return false;
3330 }
3331 
destroy_mp_common(enum vmpressure_level level)3332 static void destroy_mp_common(enum vmpressure_level level) {
3333     struct epoll_event epev;
3334     int fd = mpevfd[level];
3335 
3336     if (fd < 0) {
3337         return;
3338     }
3339 
3340     if (epoll_ctl(epollfd, EPOLL_CTL_DEL, fd, &epev)) {
3341         // Log an error and keep going
3342         ALOGE("epoll_ctl for level %s failed; errno=%d", level_name[level], errno);
3343     }
3344     maxevents--;
3345     close(fd);
3346     mpevfd[level] = -1;
3347 }
3348 
kernel_event_handler(int data __unused,uint32_t events __unused,struct polling_params * poll_params __unused)3349 static void kernel_event_handler(int data __unused, uint32_t events __unused,
3350                                  struct polling_params *poll_params __unused) {
3351     poll_kernel(kpoll_fd);
3352 }
3353 
init_monitors()3354 static bool init_monitors() {
3355     /* Try to use psi monitor first if kernel has it */
3356     use_psi_monitors = GET_LMK_PROPERTY(bool, "use_psi", true) &&
3357         init_psi_monitors();
3358     /* Fall back to vmpressure */
3359     if (!use_psi_monitors &&
3360         (!init_mp_common(VMPRESS_LEVEL_LOW) ||
3361         !init_mp_common(VMPRESS_LEVEL_MEDIUM) ||
3362         !init_mp_common(VMPRESS_LEVEL_CRITICAL))) {
3363         ALOGE("Kernel does not support memory pressure events or in-kernel low memory killer");
3364         return false;
3365     }
3366     if (use_psi_monitors) {
3367         ALOGI("Using psi monitors for memory pressure detection");
3368     } else {
3369         ALOGI("Using vmpressure for memory pressure detection");
3370     }
3371     return true;
3372 }
3373 
destroy_monitors()3374 static void destroy_monitors() {
3375     if (use_psi_monitors) {
3376         destroy_mp_psi(VMPRESS_LEVEL_CRITICAL);
3377         destroy_mp_psi(VMPRESS_LEVEL_MEDIUM);
3378         destroy_mp_psi(VMPRESS_LEVEL_LOW);
3379     } else {
3380         destroy_mp_common(VMPRESS_LEVEL_CRITICAL);
3381         destroy_mp_common(VMPRESS_LEVEL_MEDIUM);
3382         destroy_mp_common(VMPRESS_LEVEL_LOW);
3383     }
3384 }
3385 
drop_reaper_comm()3386 static void drop_reaper_comm() {
3387     close(reaper_comm_fd[0]);
3388     close(reaper_comm_fd[1]);
3389 }
3390 
setup_reaper_comm()3391 static bool setup_reaper_comm() {
3392     if (pipe(reaper_comm_fd)) {
3393         ALOGE("pipe failed: %s", strerror(errno));
3394         return false;
3395     }
3396 
3397     // Ensure main thread never blocks on read
3398     int flags = fcntl(reaper_comm_fd[0], F_GETFL);
3399     if (fcntl(reaper_comm_fd[0], F_SETFL, flags | O_NONBLOCK)) {
3400         ALOGE("fcntl failed: %s", strerror(errno));
3401         drop_reaper_comm();
3402         return false;
3403     }
3404 
3405     return true;
3406 }
3407 
init_reaper()3408 static bool init_reaper() {
3409     if (!reaper.is_reaping_supported()) {
3410         ALOGI("Process reaping is not supported");
3411         return false;
3412     }
3413 
3414     if (!setup_reaper_comm()) {
3415         ALOGE("Failed to create thread communication channel");
3416         return false;
3417     }
3418 
3419     // Setup epoll handler
3420     struct epoll_event epev;
3421     static struct event_handler_info kill_failed_hinfo = { 0, kill_fail_handler };
3422     epev.events = EPOLLIN;
3423     epev.data.ptr = (void *)&kill_failed_hinfo;
3424     if (epoll_ctl(epollfd, EPOLL_CTL_ADD, reaper_comm_fd[0], &epev)) {
3425         ALOGE("epoll_ctl failed: %s", strerror(errno));
3426         drop_reaper_comm();
3427         return false;
3428     }
3429 
3430     if (!reaper.init(reaper_comm_fd[1])) {
3431         ALOGE("Failed to initialize reaper object");
3432         if (epoll_ctl(epollfd, EPOLL_CTL_DEL, reaper_comm_fd[0], &epev)) {
3433             ALOGE("epoll_ctl failed: %s", strerror(errno));
3434         }
3435         drop_reaper_comm();
3436         return false;
3437     }
3438     maxevents++;
3439 
3440     return true;
3441 }
3442 
init(void)3443 static int init(void) {
3444     static struct event_handler_info kernel_poll_hinfo = { 0, kernel_event_handler };
3445     struct reread_data file_data = {
3446         .filename = ZONEINFO_PATH,
3447         .fd = -1,
3448     };
3449     struct epoll_event epev;
3450     int pidfd;
3451     int i;
3452     int ret;
3453 
3454     page_k = sysconf(_SC_PAGESIZE);
3455     if (page_k == -1)
3456         page_k = PAGE_SIZE;
3457     page_k /= 1024;
3458 
3459     epollfd = epoll_create(MAX_EPOLL_EVENTS);
3460     if (epollfd == -1) {
3461         ALOGE("epoll_create failed (errno=%d)", errno);
3462         return -1;
3463     }
3464 
3465     // mark data connections as not connected
3466     for (int i = 0; i < MAX_DATA_CONN; i++) {
3467         data_sock[i].sock = -1;
3468     }
3469 
3470     ctrl_sock.sock = android_get_control_socket("lmkd");
3471     if (ctrl_sock.sock < 0) {
3472         ALOGE("get lmkd control socket failed");
3473         return -1;
3474     }
3475 
3476     ret = listen(ctrl_sock.sock, MAX_DATA_CONN);
3477     if (ret < 0) {
3478         ALOGE("lmkd control socket listen failed (errno=%d)", errno);
3479         return -1;
3480     }
3481 
3482     epev.events = EPOLLIN;
3483     ctrl_sock.handler_info.handler = ctrl_connect_handler;
3484     epev.data.ptr = (void *)&(ctrl_sock.handler_info);
3485     if (epoll_ctl(epollfd, EPOLL_CTL_ADD, ctrl_sock.sock, &epev) == -1) {
3486         ALOGE("epoll_ctl for lmkd control socket failed (errno=%d)", errno);
3487         return -1;
3488     }
3489     maxevents++;
3490 
3491     has_inkernel_module = !access(INKERNEL_MINFREE_PATH, W_OK);
3492     use_inkernel_interface = has_inkernel_module;
3493 
3494     if (use_inkernel_interface) {
3495         ALOGI("Using in-kernel low memory killer interface");
3496         if (init_poll_kernel()) {
3497             epev.events = EPOLLIN;
3498             epev.data.ptr = (void*)&kernel_poll_hinfo;
3499             if (epoll_ctl(epollfd, EPOLL_CTL_ADD, kpoll_fd, &epev) != 0) {
3500                 ALOGE("epoll_ctl for lmk events failed (errno=%d)", errno);
3501                 close(kpoll_fd);
3502                 kpoll_fd = -1;
3503             } else {
3504                 maxevents++;
3505                 /* let the others know it does support reporting kills */
3506                 property_set("sys.lmk.reportkills", "1");
3507             }
3508         }
3509     } else {
3510         if (!init_monitors()) {
3511             return -1;
3512         }
3513         /* let the others know it does support reporting kills */
3514         property_set("sys.lmk.reportkills", "1");
3515     }
3516 
3517     for (i = 0; i <= ADJTOSLOT(OOM_SCORE_ADJ_MAX); i++) {
3518         procadjslot_list[i].next = &procadjslot_list[i];
3519         procadjslot_list[i].prev = &procadjslot_list[i];
3520     }
3521 
3522     memset(killcnt_idx, KILLCNT_INVALID_IDX, sizeof(killcnt_idx));
3523 
3524     /*
3525      * Read zoneinfo as the biggest file we read to create and size the initial
3526      * read buffer and avoid memory re-allocations during memory pressure
3527      */
3528     if (reread_file(&file_data) == NULL) {
3529         ALOGE("Failed to read %s: %s", file_data.filename, strerror(errno));
3530     }
3531 
3532     /* check if kernel supports pidfd_open syscall */
3533     pidfd = TEMP_FAILURE_RETRY(pidfd_open(getpid(), 0));
3534     if (pidfd < 0) {
3535         pidfd_supported = (errno != ENOSYS);
3536     } else {
3537         pidfd_supported = true;
3538         close(pidfd);
3539     }
3540     ALOGI("Process polling is %s", pidfd_supported ? "supported" : "not supported" );
3541 
3542     if (!lmkd_init_hook()) {
3543         ALOGE("Failed to initialize LMKD hooks.");
3544         return -1;
3545     }
3546 
3547     return 0;
3548 }
3549 
polling_paused(struct polling_params * poll_params)3550 static bool polling_paused(struct polling_params *poll_params) {
3551     return poll_params->paused_handler != NULL;
3552 }
3553 
resume_polling(struct polling_params * poll_params,struct timespec curr_tm)3554 static void resume_polling(struct polling_params *poll_params, struct timespec curr_tm) {
3555     poll_params->poll_start_tm = curr_tm;
3556     poll_params->poll_handler = poll_params->paused_handler;
3557     poll_params->polling_interval_ms = PSI_POLL_PERIOD_SHORT_MS;
3558     poll_params->paused_handler = NULL;
3559 }
3560 
call_handler(struct event_handler_info * handler_info,struct polling_params * poll_params,uint32_t events)3561 static void call_handler(struct event_handler_info* handler_info,
3562                          struct polling_params *poll_params, uint32_t events) {
3563     struct timespec curr_tm;
3564 
3565     watchdog.start();
3566     poll_params->update = POLLING_DO_NOT_CHANGE;
3567     handler_info->handler(handler_info->data, events, poll_params);
3568     clock_gettime(CLOCK_MONOTONIC_COARSE, &curr_tm);
3569     if (poll_params->poll_handler == handler_info) {
3570         poll_params->last_poll_tm = curr_tm;
3571     }
3572 
3573     switch (poll_params->update) {
3574     case POLLING_START:
3575         /*
3576          * Poll for the duration of PSI_WINDOW_SIZE_MS after the
3577          * initial PSI event because psi events are rate-limited
3578          * at one per sec.
3579          */
3580         poll_params->poll_start_tm = curr_tm;
3581         poll_params->poll_handler = handler_info;
3582         break;
3583     case POLLING_PAUSE:
3584         poll_params->paused_handler = handler_info;
3585         poll_params->poll_handler = NULL;
3586         break;
3587     case POLLING_RESUME:
3588         resume_polling(poll_params, curr_tm);
3589         break;
3590     case POLLING_DO_NOT_CHANGE:
3591         if (poll_params->poll_handler &&
3592             get_time_diff_ms(&poll_params->poll_start_tm, &curr_tm) > PSI_WINDOW_SIZE_MS) {
3593             /* Polled for the duration of PSI window, time to stop */
3594             poll_params->poll_handler = NULL;
3595         }
3596         break;
3597     }
3598     watchdog.stop();
3599 }
3600 
mainloop(void)3601 static void mainloop(void) {
3602     struct event_handler_info* handler_info;
3603     struct polling_params poll_params;
3604     struct timespec curr_tm;
3605     struct epoll_event *evt;
3606     long delay = -1;
3607 
3608     poll_params.poll_handler = NULL;
3609     poll_params.paused_handler = NULL;
3610 
3611     while (1) {
3612         struct epoll_event events[MAX_EPOLL_EVENTS];
3613         int nevents;
3614         int i;
3615 
3616         if (poll_params.poll_handler) {
3617             bool poll_now;
3618 
3619             clock_gettime(CLOCK_MONOTONIC_COARSE, &curr_tm);
3620             if (poll_params.update == POLLING_RESUME) {
3621                 /* Just transitioned into POLLING_RESUME, poll immediately. */
3622                 poll_now = true;
3623                 nevents = 0;
3624             } else {
3625                 /* Calculate next timeout */
3626                 delay = get_time_diff_ms(&poll_params.last_poll_tm, &curr_tm);
3627                 delay = (delay < poll_params.polling_interval_ms) ?
3628                     poll_params.polling_interval_ms - delay : poll_params.polling_interval_ms;
3629 
3630                 /* Wait for events until the next polling timeout */
3631                 nevents = epoll_wait(epollfd, events, maxevents, delay);
3632 
3633                 /* Update current time after wait */
3634                 clock_gettime(CLOCK_MONOTONIC_COARSE, &curr_tm);
3635                 poll_now = (get_time_diff_ms(&poll_params.last_poll_tm, &curr_tm) >=
3636                     poll_params.polling_interval_ms);
3637             }
3638             if (poll_now) {
3639                 call_handler(poll_params.poll_handler, &poll_params, 0);
3640             }
3641         } else {
3642             if (kill_timeout_ms && is_waiting_for_kill()) {
3643                 clock_gettime(CLOCK_MONOTONIC_COARSE, &curr_tm);
3644                 delay = kill_timeout_ms - get_time_diff_ms(&last_kill_tm, &curr_tm);
3645                 /* Wait for pidfds notification or kill timeout to expire */
3646                 nevents = (delay > 0) ? epoll_wait(epollfd, events, maxevents, delay) : 0;
3647                 if (nevents == 0) {
3648                     /* Kill notification timed out */
3649                     stop_wait_for_proc_kill(false);
3650                     if (polling_paused(&poll_params)) {
3651                         clock_gettime(CLOCK_MONOTONIC_COARSE, &curr_tm);
3652                         poll_params.update = POLLING_RESUME;
3653                         resume_polling(&poll_params, curr_tm);
3654                     }
3655                 }
3656             } else {
3657                 /* Wait for events with no timeout */
3658                 nevents = epoll_wait(epollfd, events, maxevents, -1);
3659             }
3660         }
3661 
3662         if (nevents == -1) {
3663             if (errno == EINTR)
3664                 continue;
3665             ALOGE("epoll_wait failed (errno=%d)", errno);
3666             continue;
3667         }
3668 
3669         /*
3670          * First pass to see if any data socket connections were dropped.
3671          * Dropped connection should be handled before any other events
3672          * to deallocate data connection and correctly handle cases when
3673          * connection gets dropped and reestablished in the same epoll cycle.
3674          * In such cases it's essential to handle connection closures first.
3675          */
3676         for (i = 0, evt = &events[0]; i < nevents; ++i, evt++) {
3677             if ((evt->events & EPOLLHUP) && evt->data.ptr) {
3678                 ALOGI("lmkd data connection dropped");
3679                 handler_info = (struct event_handler_info*)evt->data.ptr;
3680                 watchdog.start();
3681                 ctrl_data_close(handler_info->data);
3682                 watchdog.stop();
3683             }
3684         }
3685 
3686         /* Second pass to handle all other events */
3687         for (i = 0, evt = &events[0]; i < nevents; ++i, evt++) {
3688             if (evt->events & EPOLLERR) {
3689                 ALOGD("EPOLLERR on event #%d", i);
3690             }
3691             if (evt->events & EPOLLHUP) {
3692                 /* This case was handled in the first pass */
3693                 continue;
3694             }
3695             if (evt->data.ptr) {
3696                 handler_info = (struct event_handler_info*)evt->data.ptr;
3697                 call_handler(handler_info, &poll_params, evt->events);
3698             }
3699         }
3700     }
3701 }
3702 
issue_reinit()3703 int issue_reinit() {
3704     int sock;
3705 
3706     sock = lmkd_connect();
3707     if (sock < 0) {
3708         ALOGE("failed to connect to lmkd: %s", strerror(errno));
3709         return -1;
3710     }
3711 
3712     enum update_props_result res = lmkd_update_props(sock);
3713     switch (res) {
3714     case UPDATE_PROPS_SUCCESS:
3715         ALOGI("lmkd updated properties successfully");
3716         break;
3717     case UPDATE_PROPS_SEND_ERR:
3718         ALOGE("failed to send lmkd request: %s", strerror(errno));
3719         break;
3720     case UPDATE_PROPS_RECV_ERR:
3721         ALOGE("failed to receive lmkd reply: %s", strerror(errno));
3722         break;
3723     case UPDATE_PROPS_FORMAT_ERR:
3724         ALOGE("lmkd reply is invalid");
3725         break;
3726     case UPDATE_PROPS_FAIL:
3727         ALOGE("lmkd failed to update its properties");
3728         break;
3729     }
3730 
3731     close(sock);
3732     return res == UPDATE_PROPS_SUCCESS ? 0 : -1;
3733 }
3734 
update_props()3735 static bool update_props() {
3736     /* By default disable low level vmpressure events */
3737     level_oomadj[VMPRESS_LEVEL_LOW] =
3738         GET_LMK_PROPERTY(int32, "low", OOM_SCORE_ADJ_MAX + 1);
3739     level_oomadj[VMPRESS_LEVEL_MEDIUM] =
3740         GET_LMK_PROPERTY(int32, "medium", 800);
3741     level_oomadj[VMPRESS_LEVEL_CRITICAL] =
3742         GET_LMK_PROPERTY(int32, "critical", 0);
3743     debug_process_killing = GET_LMK_PROPERTY(bool, "debug", false);
3744 
3745     /* By default disable upgrade/downgrade logic */
3746     enable_pressure_upgrade =
3747         GET_LMK_PROPERTY(bool, "critical_upgrade", false);
3748     upgrade_pressure =
3749         (int64_t)GET_LMK_PROPERTY(int32, "upgrade_pressure", 100);
3750     downgrade_pressure =
3751         (int64_t)GET_LMK_PROPERTY(int32, "downgrade_pressure", 100);
3752     kill_heaviest_task =
3753         GET_LMK_PROPERTY(bool, "kill_heaviest_task", false);
3754     low_ram_device = property_get_bool("ro.config.low_ram", false);
3755     kill_timeout_ms =
3756         (unsigned long)GET_LMK_PROPERTY(int32, "kill_timeout_ms", 100);
3757     use_minfree_levels =
3758         GET_LMK_PROPERTY(bool, "use_minfree_levels", false);
3759     per_app_memcg =
3760         property_get_bool("ro.config.per_app_memcg", low_ram_device);
3761     swap_free_low_percentage = clamp(0, 100, GET_LMK_PROPERTY(int32, "swap_free_low_percentage",
3762         DEF_LOW_SWAP));
3763     psi_partial_stall_ms = GET_LMK_PROPERTY(int32, "psi_partial_stall_ms",
3764         low_ram_device ? DEF_PARTIAL_STALL_LOWRAM : DEF_PARTIAL_STALL);
3765     psi_complete_stall_ms = GET_LMK_PROPERTY(int32, "psi_complete_stall_ms",
3766         DEF_COMPLETE_STALL);
3767     thrashing_limit_pct =
3768             std::max(0, GET_LMK_PROPERTY(int32, "thrashing_limit",
3769                                          low_ram_device ? DEF_THRASHING_LOWRAM : DEF_THRASHING));
3770     thrashing_limit_decay_pct = clamp(0, 100, GET_LMK_PROPERTY(int32, "thrashing_limit_decay",
3771         low_ram_device ? DEF_THRASHING_DECAY_LOWRAM : DEF_THRASHING_DECAY));
3772     thrashing_critical_pct = std::max(
3773             0, GET_LMK_PROPERTY(int32, "thrashing_limit_critical", thrashing_limit_pct * 2));
3774     swap_util_max = clamp(0, 100, GET_LMK_PROPERTY(int32, "swap_util_max", 100));
3775     filecache_min_kb = GET_LMK_PROPERTY(int64, "filecache_min_kb", 0);
3776     stall_limit_critical = GET_LMK_PROPERTY(int64, "stall_limit_critical", 100);
3777 
3778     reaper.enable_debug(debug_process_killing);
3779 
3780     /* Call the update props hook */
3781     if (!lmkd_update_props_hook()) {
3782         ALOGE("Failed to update LMKD hook props.");
3783         return false;
3784     }
3785 
3786     return true;
3787 }
3788 
main(int argc,char ** argv)3789 int main(int argc, char **argv) {
3790     if ((argc > 1) && argv[1] && !strcmp(argv[1], "--reinit")) {
3791         if (property_set(LMKD_REINIT_PROP, "")) {
3792             ALOGE("Failed to reset " LMKD_REINIT_PROP " property");
3793         }
3794         return issue_reinit();
3795     }
3796 
3797     if (!update_props()) {
3798         ALOGE("Failed to initialize props, exiting.");
3799         return -1;
3800     }
3801 
3802     ctx = create_android_logger(KILLINFO_LOG_TAG);
3803 
3804     if (!init()) {
3805         if (!use_inkernel_interface) {
3806             /*
3807              * MCL_ONFAULT pins pages as they fault instead of loading
3808              * everything immediately all at once. (Which would be bad,
3809              * because as of this writing, we have a lot of mapped pages we
3810              * never use.) Old kernels will see MCL_ONFAULT and fail with
3811              * EINVAL; we ignore this failure.
3812              *
3813              * N.B. read the man page for mlockall. MCL_CURRENT | MCL_ONFAULT
3814              * pins ⊆ MCL_CURRENT, converging to just MCL_CURRENT as we fault
3815              * in pages.
3816              */
3817             /* CAP_IPC_LOCK required */
3818             if (mlockall(MCL_CURRENT | MCL_FUTURE | MCL_ONFAULT) && (errno != EINVAL)) {
3819                 ALOGW("mlockall failed %s", strerror(errno));
3820             }
3821 
3822             /* CAP_NICE required */
3823             struct sched_param param = {
3824                     .sched_priority = 1,
3825             };
3826             if (sched_setscheduler(0, SCHED_FIFO, &param)) {
3827                 ALOGW("set SCHED_FIFO failed %s", strerror(errno));
3828             }
3829         }
3830 
3831         if (init_reaper()) {
3832             ALOGI("Process reaper initialized with %d threads in the pool",
3833                 reaper.thread_cnt());
3834         }
3835 
3836         if (!watchdog.init()) {
3837             ALOGE("Failed to initialize the watchdog");
3838         }
3839 
3840         mainloop();
3841     }
3842 
3843     android_log_destroy(&ctx);
3844 
3845     ALOGI("exiting");
3846     return 0;
3847 }
3848