1 #include <binder/Binder.h>
2 #include <binder/IBinder.h>
3 #include <binder/IPCThreadState.h>
4 #include <binder/IServiceManager.h>
5 #include <cstdio>
6 #include <cstdlib>
7 #include <cstring>
8 #include <string>
9
10 #include <iomanip>
11 #include <iostream>
12 #include <tuple>
13 #include <vector>
14
15 #include <pthread.h>
16 #include <sys/wait.h>
17 #include <unistd.h>
18 #include <fstream>
19
20 using namespace std;
21 using namespace android;
22
23 enum BinderWorkerServiceCode {
24 BINDER_NOP = IBinder::FIRST_CALL_TRANSACTION,
25 };
26
27 #define ASSERT(cond) \
28 do { \
29 if (!(cond)) { \
30 cerr << __func__ << ":" << __LINE__ << " condition:" << #cond \
31 << " failed\n" \
32 << endl; \
33 exit(EXIT_FAILURE); \
34 } \
35 } while (0)
36
37 vector<sp<IBinder> > workers;
38
39 // the ratio that the service is synced on the same cpu beyond
40 // GOOD_SYNC_MIN is considered as good
41 #define GOOD_SYNC_MIN (0.6)
42
43 #define DUMP_PRESICION 2
44
45 string trace_path = "/sys/kernel/debug/tracing";
46
47 // the default value
48 int no_process = 2;
49 int iterations = 100;
50 int payload_size = 16;
51 int no_inherent = 0;
52 int no_sync = 0;
53 int verbose = 0;
54 int trace;
55
traceIsOn()56 bool traceIsOn() {
57 fstream file;
58 file.open(trace_path + "/tracing_on", ios::in);
59 char on;
60 file >> on;
61 file.close();
62 return on == '1';
63 }
64
traceStop()65 void traceStop() {
66 ofstream file;
67 file.open(trace_path + "/tracing_on", ios::out | ios::trunc);
68 file << '0' << endl;
69 file.close();
70 }
71
72 // the deadline latency that we are interested in
73 uint64_t deadline_us = 2500;
74
thread_pri()75 int thread_pri() {
76 struct sched_param param;
77 int policy;
78 ASSERT(!pthread_getschedparam(pthread_self(), &policy, ¶m));
79 return param.sched_priority;
80 }
81
thread_dump(const char * prefix)82 void thread_dump(const char* prefix) {
83 struct sched_param param;
84 int policy;
85 if (!verbose) return;
86 cout << "--------------------------------------------------" << endl;
87 cout << setw(12) << left << prefix << " pid: " << getpid()
88 << " tid: " << gettid() << " cpu: " << sched_getcpu() << endl;
89 ASSERT(!pthread_getschedparam(pthread_self(), &policy, ¶m));
90 string s = (policy == SCHED_OTHER)
91 ? "SCHED_OTHER"
92 : (policy == SCHED_FIFO)
93 ? "SCHED_FIFO"
94 : (policy == SCHED_RR) ? "SCHED_RR" : "???";
95 cout << setw(12) << left << s << param.sched_priority << endl;
96 return;
97 }
98
99 class BinderWorkerService : public BBinder {
100 public:
BinderWorkerService()101 BinderWorkerService() {
102 }
~BinderWorkerService()103 ~BinderWorkerService() {
104 }
onTransact(uint32_t code,const Parcel & data,Parcel * reply,uint32_t flags=0)105 virtual status_t onTransact(uint32_t code, const Parcel& data, Parcel* reply,
106 uint32_t flags = 0) {
107 (void)flags;
108 (void)data;
109 (void)reply;
110 switch (code) {
111 // The transaction format is like
112 //
113 // data[in]: int32: caller priority
114 // int32: caller cpu
115 //
116 // reply[out]: int32: 1 if caller's priority != callee's priority
117 // int32: 1 if caller's cpu != callee's cpu
118 //
119 // note the caller cpu read here is not always correct
120 // there're still chances that the caller got switched out
121 // right after it read the cpu number and still before the transaction.
122 case BINDER_NOP: {
123 thread_dump("binder");
124 int priority = thread_pri();
125 int priority_caller = data.readInt32();
126 int h = 0, s = 0;
127 if (priority_caller != priority) {
128 h++;
129 if (verbose) {
130 cout << "err priority_caller:" << priority_caller
131 << ", priority:" << priority << endl;
132 }
133 }
134 if (priority == sched_get_priority_max(SCHED_FIFO)) {
135 int cpu = sched_getcpu();
136 int cpu_caller = data.readInt32();
137 if (cpu != cpu_caller) {
138 s++;
139 }
140 }
141 reply->writeInt32(h);
142 reply->writeInt32(s);
143 return NO_ERROR;
144 }
145 default:
146 return UNKNOWN_TRANSACTION;
147 };
148 }
149 };
150
151 class Pipe {
152 int m_readFd;
153 int m_writeFd;
Pipe(int readFd,int writeFd)154 Pipe(int readFd, int writeFd) : m_readFd{readFd}, m_writeFd{writeFd} {
155 }
156 Pipe(const Pipe&) = delete;
157 Pipe& operator=(const Pipe&) = delete;
158 Pipe& operator=(const Pipe&&) = delete;
159
160 public:
Pipe(Pipe && rval)161 Pipe(Pipe&& rval) noexcept {
162 m_readFd = rval.m_readFd;
163 m_writeFd = rval.m_writeFd;
164 rval.m_readFd = 0;
165 rval.m_writeFd = 0;
166 }
~Pipe()167 ~Pipe() {
168 if (m_readFd) close(m_readFd);
169 if (m_writeFd) close(m_writeFd);
170 }
signal()171 void signal() {
172 bool val = true;
173 int error = write(m_writeFd, &val, sizeof(val));
174 ASSERT(error >= 0);
175 };
wait()176 void wait() {
177 bool val = false;
178 int error = read(m_readFd, &val, sizeof(val));
179 ASSERT(error >= 0);
180 }
181 template <typename T>
send(const T & v)182 void send(const T& v) {
183 int error = write(m_writeFd, &v, sizeof(T));
184 ASSERT(error >= 0);
185 }
186 template <typename T>
recv(T & v)187 void recv(T& v) {
188 int error = read(m_readFd, &v, sizeof(T));
189 ASSERT(error >= 0);
190 }
createPipePair()191 static tuple<Pipe, Pipe> createPipePair() {
192 int a[2];
193 int b[2];
194
195 int error1 = pipe(a);
196 int error2 = pipe(b);
197 ASSERT(error1 >= 0);
198 ASSERT(error2 >= 0);
199
200 return make_tuple(Pipe(a[0], b[1]), Pipe(b[0], a[1]));
201 }
202 };
203
204 typedef chrono::time_point<chrono::high_resolution_clock> Tick;
205
tickNow()206 static inline Tick tickNow() {
207 return chrono::high_resolution_clock::now();
208 }
209
tickNano(Tick & sta,Tick & end)210 static inline uint64_t tickNano(Tick& sta, Tick& end) {
211 return uint64_t(chrono::duration_cast<chrono::nanoseconds>(end - sta).count());
212 }
213
214 struct Results {
215 uint64_t m_best = 0xffffffffffffffffULL;
216 uint64_t m_worst = 0;
217 uint64_t m_transactions = 0;
218 uint64_t m_total_time = 0;
219 uint64_t m_miss = 0;
220 bool tracing;
ResultsResults221 Results(bool _tracing) : tracing(_tracing) {
222 }
miss_deadlineResults223 inline bool miss_deadline(uint64_t nano) {
224 return nano > deadline_us * 1000;
225 }
add_timeResults226 void add_time(uint64_t nano) {
227 m_best = min(nano, m_best);
228 m_worst = max(nano, m_worst);
229 m_transactions += 1;
230 m_total_time += nano;
231 if (miss_deadline(nano)) m_miss++;
232 if (miss_deadline(nano) && tracing) {
233 // There might be multiple process pair running the test concurrently
234 // each may execute following statements and only the first one actually
235 // stop the trace and any traceStop() afterthen has no effect.
236 traceStop();
237 cout << endl;
238 cout << "deadline triggered: halt & stop trace" << endl;
239 cout << "log:" + trace_path + "/trace" << endl;
240 cout << endl;
241 exit(1);
242 }
243 }
dumpResults244 void dump() {
245 double best = (double)m_best / 1.0E6;
246 double worst = (double)m_worst / 1.0E6;
247 double average = (double)m_total_time / m_transactions / 1.0E6;
248 // FIXME: libjson?
249 int W = DUMP_PRESICION + 2;
250 cout << setprecision(DUMP_PRESICION) << "{ \"avg\":" << setw(W) << left
251 << average << ",\"wst\":" << setw(W) << left << worst
252 << ",\"bst\":" << setw(W) << left << best << ",\"miss\":" << left
253 << m_miss << ",\"meetR\":" << left << setprecision(DUMP_PRESICION + 3)
254 << (1.0 - (double)m_miss / m_transactions) << "}";
255 }
256 };
257
generateServiceName(int num)258 String16 generateServiceName(int num) {
259 char num_str[32];
260 snprintf(num_str, sizeof(num_str), "%d", num);
261 String16 serviceName = String16("binderWorker") + String16(num_str);
262 return serviceName;
263 }
264
parcel_fill(Parcel & data,int sz,int priority,int cpu)265 static void parcel_fill(Parcel& data, int sz, int priority, int cpu) {
266 ASSERT(sz >= (int)sizeof(uint32_t) * 2);
267 data.writeInt32(priority);
268 data.writeInt32(cpu);
269 sz -= sizeof(uint32_t);
270 while (sz > (int)sizeof(uint32_t)) {
271 data.writeInt32(0);
272 sz -= sizeof(uint32_t);
273 }
274 }
275
276 typedef struct {
277 void* result;
278 int target;
279 } thread_priv_t;
280
thread_start(void * p)281 static void* thread_start(void* p) {
282 thread_priv_t* priv = (thread_priv_t*)p;
283 int target = priv->target;
284 Results* results_fifo = (Results*)priv->result;
285 Parcel data, reply;
286 Tick sta, end;
287
288 parcel_fill(data, payload_size, thread_pri(), sched_getcpu());
289 thread_dump("fifo-caller");
290
291 sta = tickNow();
292 status_t ret = workers[target]->transact(BINDER_NOP, data, &reply);
293 end = tickNow();
294 results_fifo->add_time(tickNano(sta, end));
295
296 no_inherent += reply.readInt32();
297 no_sync += reply.readInt32();
298 return 0;
299 }
300
301 // create a fifo thread to transact and wait it to finished
thread_transaction(int target,Results * results_fifo)302 static void thread_transaction(int target, Results* results_fifo) {
303 thread_priv_t thread_priv;
304 void* dummy;
305 pthread_t thread;
306 pthread_attr_t attr;
307 struct sched_param param;
308 thread_priv.target = target;
309 thread_priv.result = results_fifo;
310 ASSERT(!pthread_attr_init(&attr));
311 ASSERT(!pthread_attr_setschedpolicy(&attr, SCHED_FIFO));
312 param.sched_priority = sched_get_priority_max(SCHED_FIFO);
313 ASSERT(!pthread_attr_setschedparam(&attr, ¶m));
314 ASSERT(!pthread_create(&thread, &attr, &thread_start, &thread_priv));
315 ASSERT(!pthread_join(thread, &dummy));
316 }
317
318 #define is_client(_num) ((_num) >= (no_process / 2))
319
worker_fx(int num,int no_process,int iterations,int payload_size,Pipe p)320 void worker_fx(int num, int no_process, int iterations, int payload_size,
321 Pipe p) {
322 int dummy;
323 Results results_other(false), results_fifo(trace);
324
325 // Create BinderWorkerService and for go.
326 ProcessState::self()->startThreadPool();
327 sp<IServiceManager> serviceMgr = defaultServiceManager();
328 sp<BinderWorkerService> service = new BinderWorkerService;
329 serviceMgr->addService(generateServiceName(num), service);
330 // init done
331 p.signal();
332 // wait for kick-off
333 p.wait();
334
335 // If client/server pairs, then half the workers are
336 // servers and half are clients
337 int server_count = no_process / 2;
338
339 for (int i = 0; i < server_count; i++) {
340 // self service is in-process so just skip
341 if (num == i) continue;
342 workers.push_back(serviceMgr->getService(generateServiceName(i)));
343 }
344
345 // Client for each pair iterates here
346 // each iterations contains exatcly 2 transactions
347 for (int i = 0; is_client(num) && i < iterations; i++) {
348 Parcel data, reply;
349 Tick sta, end;
350 // the target is paired to make it easier to diagnose
351 int target = num % server_count;
352
353 // 1. transaction by fifo thread
354 thread_transaction(target, &results_fifo);
355 parcel_fill(data, payload_size, thread_pri(), sched_getcpu());
356 thread_dump("other-caller");
357
358 // 2. transaction by other thread
359 sta = tickNow();
360 ASSERT(NO_ERROR == workers[target]->transact(BINDER_NOP, data, &reply));
361 end = tickNow();
362 results_other.add_time(tickNano(sta, end));
363
364 no_inherent += reply.readInt32();
365 no_sync += reply.readInt32();
366 }
367 // Signal completion to master and wait.
368 p.signal();
369 p.wait();
370
371 p.send(&dummy);
372 // wait for kill
373 p.wait();
374 // Client for each pair dump here
375 if (is_client(num)) {
376 int no_trans = iterations * 2;
377 double sync_ratio = (1.0 - (double)no_sync / no_trans);
378 // FIXME: libjson?
379 cout << "\"P" << (num - server_count) << "\":{\"SYNC\":\""
380 << ((sync_ratio > GOOD_SYNC_MIN) ? "GOOD" : "POOR") << "\","
381 << "\"S\":" << (no_trans - no_sync) << ",\"I\":" << no_trans << ","
382 << "\"R\":" << sync_ratio << "," << endl;
383
384 cout << " \"other_ms\":";
385 results_other.dump();
386 cout << "," << endl;
387 cout << " \"fifo_ms\": ";
388 results_fifo.dump();
389 cout << endl;
390 cout << "}," << endl;
391 }
392 exit(no_inherent);
393 }
394
make_process(int num,int iterations,int no_process,int payload_size)395 Pipe make_process(int num, int iterations, int no_process, int payload_size) {
396 auto pipe_pair = Pipe::createPipePair();
397 pid_t pid = fork();
398 if (pid) {
399 // parent
400 return move(get<0>(pipe_pair));
401 } else {
402 // child
403 thread_dump(is_client(num) ? "client" : "server");
404 worker_fx(num, no_process, iterations, payload_size,
405 move(get<1>(pipe_pair)));
406 // never get here
407 return move(get<0>(pipe_pair));
408 }
409 }
410
wait_all(vector<Pipe> & v)411 void wait_all(vector<Pipe>& v) {
412 for (size_t i = 0; i < v.size(); i++) {
413 v[i].wait();
414 }
415 }
416
signal_all(vector<Pipe> & v)417 void signal_all(vector<Pipe>& v) {
418 for (size_t i = 0; i < v.size(); i++) {
419 v[i].signal();
420 }
421 }
422
423 // This test is modified from binderThroughputTest.cpp
main(int argc,char ** argv)424 int main(int argc, char** argv) {
425 for (int i = 1; i < argc; i++) {
426 if (string(argv[i]) == "-i") {
427 iterations = atoi(argv[i + 1]);
428 i++;
429 continue;
430 }
431 if (string(argv[i]) == "-pair") {
432 no_process = 2 * atoi(argv[i + 1]);
433 i++;
434 continue;
435 }
436 if (string(argv[i]) == "-deadline_us") {
437 deadline_us = atoi(argv[i + 1]);
438 i++;
439 continue;
440 }
441 if (string(argv[i]) == "-v") {
442 verbose = 1;
443 }
444 // The -trace argument is used like that:
445 //
446 // First start trace with atrace command as usual
447 // >atrace --async_start sched freq
448 //
449 // then use schd-dbg with -trace arguments
450 //./schd-dbg -trace -deadline_us 2500
451 //
452 // This makes schd-dbg to stop trace once it detects a transaction
453 // duration over the deadline. By writing '0' to
454 // /sys/kernel/debug/tracing and halt the process. The tracelog is
455 // then available on /sys/kernel/debug/trace
456 if (string(argv[i]) == "-trace") {
457 trace = 1;
458 }
459 }
460 if (trace && !traceIsOn()) {
461 cout << "trace is not running" << endl;
462 cout << "check " << trace_path + "/tracing_on" << endl;
463 cout << "use atrace --async_start first" << endl;
464 exit(-1);
465 }
466 vector<Pipe> pipes;
467 thread_dump("main");
468 // FIXME: libjson?
469 cout << "{" << endl;
470 cout << "\"cfg\":{\"pair\":" << (no_process / 2)
471 << ",\"iterations\":" << iterations << ",\"deadline_us\":" << deadline_us
472 << "}," << endl;
473
474 // the main process fork 2 processes for each pairs
475 // 1 server + 1 client
476 // each has a pipe to communicate with
477 for (int i = 0; i < no_process; i++) {
478 pipes.push_back(make_process(i, iterations, no_process, payload_size));
479 }
480 // wait for init done
481 wait_all(pipes);
482 // kick-off iterations
483 signal_all(pipes);
484 // wait for completion
485 wait_all(pipes);
486 // start to send result
487 signal_all(pipes);
488 for (int i = 0; i < no_process; i++) {
489 int status;
490 // kill
491 pipes[i].signal();
492 wait(&status);
493 // the exit status is number of transactions without priority inheritance
494 // detected in the child process
495 no_inherent += status;
496 }
497 // FIXME: libjson?
498 cout << "\"inheritance\": " << (no_inherent == 0 ? "\"PASS\"" : "\"FAIL\"")
499 << endl;
500 cout << "}" << endl;
501 return -no_inherent;
502 }
503