• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2021, Google Inc. All rights reserved
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining
5  * a copy of this software and associated documentation files
6  * (the "Software"), to deal in the Software without restriction,
7  * including without limitation the rights to use, copy, modify, merge,
8  * publish, distribute, sublicense, and/or sell copies of the Software,
9  * and to permit persons to whom the Software is furnished to do so,
10  * subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be
13  * included in all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18  * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19  * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 #define LOCAL_TRACE (0)
25 
26 #include <kernel/debug.h>
27 #include <kernel/event.h>
28 #include <kernel/mutex.h>
29 #include <kernel/thread.h>
30 #include <lib/unittest/unittest.h>
31 #include <lk/init.h>
32 #include <lk/trace.h>
33 #include <stdbool.h>
34 #include <stdio.h>
35 
36 #define PINCPU_TEST_CPU_COUNT 4
37 
38 /*
39  * Test verifying cpu pinning on any thread state
40  * Make sure cpu pinning can be dynamically set on
41  * a running, ready, blocked or sleeping thread.
42  */
43 
44 /**
45  * struct pincputest_thread_ctx_main - main thread context structure
46  * @test_state:               Current test state
47  * @thread:                   `main` thread's thread structure
48  * @ev_req:                   Request event sent by the unittest thread to
49  *                            the main thread in order to start a new test
50  * @ev_resp:                  Response event sent by the main thread to
51  *                            the unittest thread when the test case is
52  *                            complete
53  * @runningstate_lock:        Spin_lock used to ensure the unittest thread
54  *                            waits in running state. This is used to keep
55  *                            the peer thread in ready state.
56  * @cpu_expected:             Cpu on which the main thread is pinned to and
57  *                            shall be running on when setting the peer
58  *                            thread's pinned cpu.
59  * @actual_pinned_cpu:        Peer thread's pinned cpu as seen by the main
60  *                            thread when the thread_set_pinned_cpu is complete
61  */
62 struct pincputest_thread_ctx_main {
63     void* test_state;
64     thread_t* thread;
65     event_t ev_req;
66     event_t ev_resp;
67     spin_lock_t runningstate_lock;
68     int cpu_expected;
69     int actual_pinned_cpu;
70 };
71 
72 /**
73  * struct pincputest_thread_ctx_peer - peer thread context structure
74  * @test_state:               Current test state
75  * @thread:                   `peer` thread's thread structure
76  * @ev_req:                   Request event sent by the main thread to
77  *                            the peer thread.
78  * @ev_resp:                  Response event sent by the peer thread to
79  *                            the main thread
80  * @blockingstate_mutex:      Mutex used to set the peer thread
81  *                            in blocking state
82  * @blockingstate_event:      Event used to set the peer thread
83  *                            in blocking state, as an additional option
84  *                            to the mutex.
85  * @runningstate_lock:        Spin_lock used to set the peer thread
86  *                            in running state.
87  * @cpu_expected:             Cpu on which the peer thread is pinned to.
88  * @cpu_actual:               Cpu on which the peer thread is running
89  *                            after handling the request event
90  *                            from the main thread
91  */
92 struct pincputest_thread_ctx_peer {
93     void* test_state;
94     thread_t* thread;
95     event_t ev_req;
96     event_t ev_resp;
97     mutex_t blockingstate_mutex;
98     event_t blockingstate_event;
99     spin_lock_t runningstate_lock;
100     int cpu_expected;
101     int cpu_actual;
102 };
103 
104 /**
105  * struct pincputest_t - test state structure for the pincputest
106  * @is_rt_main:               If true, main thread is real-time
107  * @is_rt_peer:               If true, peer thread is real-time
108  * @pinned_is_current:        If true, main thread sets the peer
109  *                            thread's pinned cpu to the current cpu
110  * @priority_main:            Priority of the main thread
111  * @priority_peer:            Priority of the peer thread
112  * @expected_state_peer:      Expected thread state
113  *                            (running/ready/blocked/sleeping)
114  *                            for the peer thread to be in when
115  *                            its cpu pinning is updated
116  * @blockingstate_use_mutex:  If true, the peer thread blocks on a mutex,
117  *                            If false, it blocks on an event.
118  * @ctx_main:                 main thread context
119  * @ctx_peer:                 peer thread context
120  * The pincputest consists of a `main` thread setting cpu pinning
121  * on a `peer` thread. The test covers main and peer threads
122  * being standard or realtime threads and having different relative priorities.
123  */
124 typedef struct {
125     bool is_rt_main;
126     bool is_rt_peer;
127     bool pinned_is_current;
128     int priority_main;
129     int priority_peer;
130     enum thread_state expected_state_peer;
131     bool blockingstate_use_mutex;
132     struct pincputest_thread_ctx_main ctx_main;
133     struct pincputest_thread_ctx_peer ctx_peer;
134 } pincputest_t;
135 
thread_state_to_str(enum thread_state state)136 static const char* thread_state_to_str(enum thread_state state) {
137     switch (state) {
138     case THREAD_SUSPENDED:
139         return "Suspended";
140     case THREAD_READY:
141         return "Ready";
142     case THREAD_RUNNING:
143         return "Running";
144     case THREAD_BLOCKED:
145         return "Blocked";
146     case THREAD_SLEEPING:
147         return "Sleeping";
148     case THREAD_DEATH:
149         return "Death";
150     default:
151         return "Unknown";
152     }
153 }
154 
pincputest_param_to_string(const void * param)155 static void pincputest_param_to_string(const void* param) {
156     const void* const* params = param;
157     TRACEF("[main:%s(%d), peer:%s(%d)]\n", *(bool*)params[0] ? "rt" : "std",
158            *(int*)params[2], *(bool*)params[1] ? "rt" : "std",
159            *(int*)params[3]);
160 }
161 
162 /**
163  * pincputest_unittest_thread() is the controller test function
164  * invoked from the unittest thread.
165  * It will ensure that the peer thread is in a given state
166  * before the main thread invoked thread_set_pinned_cpu on the peer thread
167  */
pincputest_unittest_thread(pincputest_t * _state)168 static void pincputest_unittest_thread(pincputest_t* _state) {
169     struct pincputest_thread_ctx_main* ctx_main = &_state->ctx_main;
170     struct pincputest_thread_ctx_peer* ctx_peer = &_state->ctx_peer;
171     spin_lock_saved_state_t lock_state_main;
172     spin_lock_saved_state_t lock_state_peer;
173     ctx_main->test_state = _state;
174     ctx_peer->test_state = _state;
175     int cpu_peer = -1;           /* current cpu for the peer thread */
176     ctx_peer->cpu_expected = -1; /* new target/pinned cpu for the peer thread */
177     ctx_main->cpu_expected = -1; /* target cpu for the main thread */
178 
179     /*
180      * set current thread's priority just higher from peer:
181      * this thread (the unittest thread) handles the
182      * necessary locking / unlocking for the peer thread
183      * to reach the desired state.
184      * So the unittest thread shall be either same priority
185      * than peer if non real-time (with time slicing
186      * / collaborative multi-threading), or higher priority
187      * in case peer is a real-time thead.
188      * To make things simpler, we set its priority higher
189      * than the peer thread
190      */
191     thread_set_priority(_state->priority_peer + 1);
192 
193     for (int c = 0; c <= PINCPU_TEST_CPU_COUNT; c++) {
194         /* reset actual_pinned_cpu:
195          * [-1..PINCPU_TEST_CPU_COUNT-1] are all valid values
196          * PINCPU_TEST_CPU_COUNT can thus be used as the reset value
197          */
198         ctx_main->actual_pinned_cpu = PINCPU_TEST_CPU_COUNT;
199         cpu_peer = ctx_peer->cpu_actual == -1 ? 0 : ctx_peer->cpu_actual;
200         if (thread_pinned_cpu(ctx_peer->thread) == -1) {
201             /* this case can only happen at the beginning of the test */
202             DEBUG_ASSERT(c == 0);
203             thread_set_pinned_cpu(ctx_peer->thread, cpu_peer);
204         }
205         DEBUG_ASSERT(thread_pinned_cpu(ctx_peer->thread)== cpu_peer);
206 
207         /*
208          * define the new target pinned cpu
209          * for both peer and main threads
210          */
211         ctx_peer->cpu_expected = (cpu_peer + 1) % PINCPU_TEST_CPU_COUNT;
212         if (_state->pinned_is_current) {
213             ctx_main->cpu_expected = ctx_peer->cpu_expected;
214         } else {
215             ctx_main->cpu_expected = (cpu_peer + 2) % PINCPU_TEST_CPU_COUNT;
216         }
217         thread_set_pinned_cpu(ctx_main->thread, ctx_main->cpu_expected);
218         if (c == PINCPU_TEST_CPU_COUNT) {
219             /* for the last round, unpin peer thread */
220             ctx_peer->cpu_expected = -1;
221         }
222 
223         /*
224          * `unittest` thread shall be pinned to same cpu
225          * as `peer` thread when peer expected state is
226          * ready, blocked or sleeping.
227          * However if the expected state is running, the
228          * unittest thread shall be pinned to another cpu
229          * (another than current and new pinned target cpu)
230          */
231         if (_state->expected_state_peer == THREAD_RUNNING) {
232             thread_set_pinned_cpu(get_current_thread(),
233                                   (cpu_peer + 3) % PINCPU_TEST_CPU_COUNT);
234         } else {
235             thread_set_pinned_cpu(get_current_thread(), cpu_peer);
236         }
237         /*
238          * In unit-test environment, ensure all cpus are idle before
239          * starting the test. We actually want the cpu on which to pin the
240          * peer thread to be idle when thread_set_pinned_cpu is invoked, as
241          * this is the most complex scheduling state.
242          */
243         thread_sleep_ns(100000000); /* wait 100ms for tgt cpu to be idle */
244 
245         /* start the test by prepping
246          * the locking states for the peer thread
247          */
248         if (_state->expected_state_peer == THREAD_BLOCKED &&
249             _state->blockingstate_use_mutex) {
250             mutex_acquire(&ctx_peer->blockingstate_mutex);
251         }
252 
253         /*
254          * notify the peer thread of a new test:
255          * the peer thread will take the right
256          * action to reach its expected state
257          * before the main thread invokes
258          * thread_set_pinned_cpu()
259          */
260         LTRACEF("ev_req sent to peer (pinned_cpu=%d curr_cpu=%d)\n",
261                 thread_pinned_cpu(get_current_thread()), arch_curr_cpu_num());
262         event_signal(&ctx_peer->ev_req, true);
263 
264         /*
265          * then notify the main thread
266          * to invoke thread_set_pinned_cpu
267          */
268         LTRACEF("ev_req sent to main (pinned_cpu=%d curr_cpu=%d)\n",
269                 thread_pinned_cpu(get_current_thread()), arch_curr_cpu_num());
270         event_signal(&ctx_main->ev_req, true);
271 
272         /* now wait for the main thread to invoke
273          * the cpu pinning on the peer thread
274          * if main thread is not supposed to be
275          * preempted by the peer thread, simply
276          * wait for the response event.
277          * however if main thread is preempted while
278          * right away (due to a higher priority peer thread)
279          * peer thread needs to be unlocked as soon
280          * as peer thread is pinned to new cpu and main thread
281          * is in ready state
282          */
283         bool main_preempted = false;
284         if ((ctx_peer->cpu_expected > -1) &&
285             (_state->expected_state_peer == THREAD_RUNNING)) {
286             if (_state->priority_main < _state->priority_peer) {
287                 main_preempted = true;
288             }
289         }
290         if (main_preempted) {
291             // thread_sleep_ns(1000000000);  // sleep 1sec
292             volatile int* peer_cpu_ptr = &ctx_peer->cpu_actual;
293             int peer_cpu;
294             int busy_loop_cnt = 0;
295             do {
296                 spin_lock_irqsave(&ctx_peer->runningstate_lock,
297                                   lock_state_peer);
298                 peer_cpu = *peer_cpu_ptr;
299                 spin_unlock_irqrestore(&ctx_peer->runningstate_lock,
300                                        lock_state_peer);
301                 if (peer_cpu != ctx_peer->cpu_expected &&
302                     ++busy_loop_cnt % 10000 == 0) {
303                     LTRACEF("%s: thread %s, actual_cpu [%d] != expected_cpu [%d], keep waiting...\n",
304                             __func__, ctx_peer->thread->name, peer_cpu,
305                             ctx_peer->cpu_expected);
306                 }
307             } while (peer_cpu != ctx_peer->cpu_expected);
308 
309             LTRACEF("%s: thread %s, curr_cpu [%d] == expected_cpu [%d]!\n",
310                     __func__, ctx_peer->thread->name, peer_cpu,
311                     ctx_peer->cpu_expected);
312         } else if (_state->expected_state_peer == THREAD_READY) {
313             /*
314              * for peer thread to be in READY state, the higher priority
315              * unittest thread shall be busy on the same cpu as the peer thread
316              * until the main thread is done with the pinned cpu request.
317              * loop until the peer thread pinning completes.
318              */
319             volatile int* peer_pinned_cpu_ptr = &ctx_main->actual_pinned_cpu;
320             int peer_cpu;
321             // int busy_loop_cnt = 0;
322             do {
323                 spin_lock_irqsave(&ctx_main->runningstate_lock,
324                                   lock_state_main);
325                 peer_cpu = *peer_pinned_cpu_ptr;
326                 spin_unlock_irqrestore(&ctx_main->runningstate_lock,
327                                        lock_state_main);
328                 /* note: do not add LTRACEF statement as the thread will
329                  * become BLOCKED and the expected peer thread state
330                  * will not be gguaranted!!
331                  */
332             } while (peer_cpu != ctx_peer->cpu_expected);
333             LTRACEF("ev_resp from main waiting...\n");
334             event_wait(&ctx_main->ev_resp);
335             LTRACEF("ev_resp from main received!\n");
336         } else {
337             LTRACEF("ev_resp from main waiting...\n");
338             event_wait(&ctx_main->ev_resp);
339             LTRACEF("ev_resp from main received!\n");
340         }
341         /* unblock the peer thread for it
342          * to complete the test
343          * (report its actual cpu)
344          */
345         if (_state->expected_state_peer == THREAD_BLOCKED &&
346             _state->blockingstate_use_mutex) {
347             mutex_release(&ctx_peer->blockingstate_mutex);
348         } else if (_state->expected_state_peer == THREAD_BLOCKED &&
349                    !_state->blockingstate_use_mutex) {
350             event_signal(&ctx_peer->blockingstate_event, true);
351         }
352         event_wait(&ctx_peer->ev_resp);
353         if (main_preempted) {
354             event_wait(&ctx_main->ev_resp);
355         }
356         thread_sleep_ns(100000000); /* wait 100ms for tgt cpu to be idle */
357         LTRACEF("%s[%s] / %s[%s]\n", ctx_main->thread->name,
358                 thread_state_to_str(ctx_main->thread->state),
359                 ctx_peer->thread->name,
360                 thread_state_to_str(ctx_peer->thread->state));
361         DEBUG_ASSERT(ctx_main->thread->state == THREAD_BLOCKED);
362         DEBUG_ASSERT(ctx_peer->thread->state == THREAD_BLOCKED);
363         if (ctx_peer->cpu_expected > -1) {
364             ASSERT_EQ(ctx_peer->cpu_expected, ctx_peer->cpu_actual);
365         } else {
366             ASSERT_GT(ctx_peer->cpu_actual, -1);
367         }
368         LTRACEF("%s: cpu expected (%d) actual (%d)\n**********************\n",
369                 __func__, ctx_peer->cpu_expected, ctx_peer->cpu_actual);
370     }
371 test_abort:
372     thread_set_priority(HIGH_PRIORITY);
373     thread_set_pinned_cpu(get_current_thread(), -1);
374     ctx_main->test_state = NULL;
375     ctx_peer->test_state = NULL;
376 }
377 
pincputest_main_thread(void * _state)378 static int pincputest_main_thread(void* _state) {
379     pincputest_t* state = _state;
380     struct pincputest_thread_ctx_main* ctx = &state->ctx_main;
381     struct pincputest_thread_ctx_peer* ctx_peer = &state->ctx_peer;
382     spin_lock_saved_state_t lock_state_main;
383     while (1) {
384         LTRACEF("ev_req waiting in main (pinned_cpu=%d curr_cpu=%d)\n",
385                 thread_pinned_cpu(get_current_thread()), arch_curr_cpu_num());
386         event_wait(&ctx->ev_req);
387         LTRACEF("ev_req received in main (pinned_cpu=%d curr_cpu=%d)\n",
388                 thread_pinned_cpu(get_current_thread()), arch_curr_cpu_num());
389         DEBUG_ASSERT(_state);
390         if (state->expected_state_peer == THREAD_DEATH) {
391             /* exiting */
392             LTRACEF("main thread exiting...\n");
393             event_signal(&ctx->ev_resp, true);
394             return 0;
395         }
396         thread_set_priority(state->priority_main);
397         while (_state &&
398                ctx_peer->thread->state != state->expected_state_peer) {
399             thread_sleep_ns(10000000);  // sleep 10ms
400             if (ctx_peer->thread->state != state->expected_state_peer) {
401                 LTRACEF("%s: thread %s, state [%s] != expected [%s], keep waiting...\n",
402                         __func__, ctx_peer->thread->name,
403                         thread_state_to_str(ctx_peer->thread->state),
404                         thread_state_to_str(state->expected_state_peer));
405             }
406         }
407         DEBUG_ASSERT(_state);
408         if (ctx->cpu_expected > -1) {
409             ASSERT_EQ(thread_curr_cpu(get_current_thread()), ctx->cpu_expected);
410         }
411         if (ctx_peer->cpu_expected == -1) {
412             thread_set_pinned_cpu(get_current_thread(), -1);
413         }
414         thread_set_pinned_cpu(ctx_peer->thread, ctx_peer->cpu_expected);
415         spin_lock_irqsave(&ctx->runningstate_lock, lock_state_main);
416         ctx->actual_pinned_cpu = thread_pinned_cpu(ctx_peer->thread);
417         spin_unlock_irqrestore(&ctx->runningstate_lock, lock_state_main);
418         event_signal(&ctx->ev_resp, true);
419         LTRACEF("ev_resp sent...\n");
420         DEBUG_ASSERT(_state);
421     test_abort:;
422         thread_set_priority(HIGH_PRIORITY);
423     }
424     return 0;
425 }
426 
pincputest_peer_thread(void * _state)427 static int pincputest_peer_thread(void* _state) {
428     pincputest_t* state = _state;
429     struct pincputest_thread_ctx_peer* ctx = &state->ctx_peer;
430     spin_lock_saved_state_t lock_state_peer;
431     int pinned_cpu;
432     int curr_cpu;
433     bool done;
434     while (1) {
435         LTRACEF("ev_req waiting in peer (pinned_cpu=%d curr_cpu=%d)\n",
436                 thread_pinned_cpu(get_current_thread()), arch_curr_cpu_num());
437         event_wait(&ctx->ev_req);
438         LTRACEF("ev_req received in peer (pinned_cpu=%d curr_cpu=%d)\n",
439                 thread_pinned_cpu(get_current_thread()), arch_curr_cpu_num());
440         DEBUG_ASSERT(_state);
441         thread_set_priority(state->priority_peer);
442         switch (state->expected_state_peer) {
443         case THREAD_RUNNING:
444         case THREAD_READY:
445             /* start busy loop */
446             done = false;
447             do {
448                 spin_lock_irqsave(&ctx->runningstate_lock, lock_state_peer);
449                 pinned_cpu = thread_pinned_cpu(get_current_thread());
450                 curr_cpu = thread_curr_cpu(get_current_thread());
451                 spin_unlock_irqrestore(&ctx->runningstate_lock,
452                                        lock_state_peer);
453 
454                 if (ctx->cpu_expected > -1) {
455                     if (curr_cpu == ctx->cpu_expected) {
456                         done = true;
457                     }
458                 } else {
459                     if (pinned_cpu == -1) {
460                         done = true;
461                     }
462                 }
463             } while (!done);
464             LTRACEF("%s: thread %s, curr_cpu [%d] == expected_cpu [%d]!\n",
465                     __func__, ctx->thread->name, curr_cpu, ctx->cpu_expected);
466             break;
467         case THREAD_BLOCKED:
468             /* go to BLOCKED state */
469             if (state->blockingstate_use_mutex) {
470                 mutex_acquire(&ctx->blockingstate_mutex);
471                 mutex_release(&ctx->blockingstate_mutex);
472             } else {
473                 event_wait(&ctx->blockingstate_event);
474             }
475             break;
476         case THREAD_SLEEPING:
477             /* go to SLEEPING state for 1 sec */
478             thread_sleep_ns(1000000000);
479             break;
480         case THREAD_DEATH:
481             /* exiting */
482             LTRACEF("peer thread exiting...\n");
483             event_signal(&ctx->ev_resp, true);
484             return 0;
485         default:
486             event_signal(&ctx->ev_resp, true);
487             return -1;
488         }
489         spin_lock_irqsave(&ctx->runningstate_lock, lock_state_peer);
490         ctx->cpu_actual = thread_curr_cpu(get_current_thread());
491         spin_unlock_irqrestore(&ctx->runningstate_lock, lock_state_peer);
492         if (ctx->cpu_expected > -1) {
493             LTRACEF("PinCpuWhenThreadState%s [%s] cpu expected (%d) actual (%d)\n",
494                     thread_state_to_str(state->expected_state_peer),
495                     ctx->cpu_expected == ctx->cpu_actual ? "PASSED" : "FAILED",
496                     ctx->cpu_expected, ctx->cpu_actual);
497         }
498         event_signal(&ctx->ev_resp, true);
499         thread_set_priority(HIGH_PRIORITY);
500     }
501     return 0;
502 }
503 
pincputest_init_threads(pincputest_t * _state)504 static void pincputest_init_threads(pincputest_t* _state) {
505     char name[24];
506 
507     /* init peer thread */
508     const char* peer_name = "pincputest-peer-";
509     struct pincputest_thread_ctx_peer* ctx_peer = &_state->ctx_peer;
510     strlcpy(name, peer_name, sizeof(name));
511     strlcat(name, _state->is_rt_peer ? "rt" : "std", sizeof(name));
512     event_init(&ctx_peer->ev_req, false, EVENT_FLAG_AUTOUNSIGNAL);
513     event_init(&ctx_peer->ev_resp, false, EVENT_FLAG_AUTOUNSIGNAL);
514     spin_lock_init(&ctx_peer->runningstate_lock);
515     mutex_init(&ctx_peer->blockingstate_mutex);
516     event_init(&ctx_peer->blockingstate_event, false, EVENT_FLAG_AUTOUNSIGNAL);
517     ctx_peer->thread =
518             thread_create(name, pincputest_peer_thread, (void*)_state,
519                           HIGH_PRIORITY, DEFAULT_STACK_SIZE);
520     if (_state->is_rt_peer) {
521         thread_set_real_time(ctx_peer->thread);
522     }
523     thread_set_pinned_cpu(ctx_peer->thread, 0);
524     ctx_peer->cpu_actual = -1;
525 
526     /* init main thread */
527     struct pincputest_thread_ctx_main* ctx_main = &_state->ctx_main;
528     const char* main_name = "pincputest-main-";
529     strlcpy(name, main_name, sizeof(name));
530     strlcat(name, _state->is_rt_main ? "rt" : "std", sizeof(name));
531     event_init(&ctx_main->ev_req, false, EVENT_FLAG_AUTOUNSIGNAL);
532     event_init(&ctx_main->ev_resp, false, EVENT_FLAG_AUTOUNSIGNAL);
533     spin_lock_init(&ctx_main->runningstate_lock);
534     ctx_main->thread =
535             thread_create(name, pincputest_main_thread, (void*)_state,
536                           HIGH_PRIORITY, DEFAULT_STACK_SIZE);
537     if (_state->is_rt_main) {
538         thread_set_real_time(ctx_main->thread);
539     }
540     thread_resume(ctx_peer->thread);
541     thread_resume(ctx_main->thread);
542 }
543 
TEST_F_SETUP(pincputest)544 TEST_F_SETUP(pincputest) {
545     const void* const* params = GetParam();
546     // pincputest_param_to_string(params);
547     const bool* is_rt_main = params[0];
548     const bool* is_rt_peer = params[1];
549     const bool* pinned_is_current = params[2];
550     const int* priority_main = params[3];
551     const int* priority_peer = params[4];
552     _state->is_rt_main = *is_rt_main;
553     _state->is_rt_peer = *is_rt_peer;
554     _state->pinned_is_current = *pinned_is_current;
555     _state->priority_main = *priority_main;
556     _state->priority_peer = *priority_peer;
557     pincputest_init_threads(_state);
558 }
559 
TEST_F_TEARDOWN(pincputest)560 TEST_F_TEARDOWN(pincputest) {
561     int ret;
562     struct pincputest_thread_ctx_main* ctx_main = &_state->ctx_main;
563     struct pincputest_thread_ctx_peer* ctx_peer = &_state->ctx_peer;
564 
565     _state->expected_state_peer = THREAD_DEATH;
566     /* exiting main thread */
567     event_signal(&ctx_main->ev_req, true);
568     event_wait(&ctx_main->ev_resp);
569     thread_join(_state->ctx_main.thread, &ret, INFINITE_TIME);
570 
571     /* exiting peer thread */
572     event_signal(&ctx_peer->ev_req, true);
573     event_wait(&ctx_peer->ev_resp);
574     thread_join(_state->ctx_peer.thread, &ret, INFINITE_TIME);
575 
576     event_destroy(&ctx_main->ev_req);
577     event_destroy(&ctx_main->ev_resp);
578     event_destroy(&ctx_peer->ev_req);
579     event_destroy(&ctx_peer->ev_resp);
580     mutex_destroy(&ctx_peer->blockingstate_mutex);
581     event_destroy(&ctx_peer->blockingstate_event);
582 }
583 
TEST_P(pincputest,PinCpuWhenThreadStateRunning)584 TEST_P(pincputest, PinCpuWhenThreadStateRunning) {
585     LTRACEF("PinCpuWhenThreadStateRunning\n");
586     _state->expected_state_peer = THREAD_RUNNING;
587     _state->blockingstate_use_mutex = false;
588     pincputest_unittest_thread(_state);
589     ASSERT_EQ(HasFailure(), 0);
590 test_abort:;
591 }
592 
TEST_P(pincputest,PinCpuWhenThreadStateReady)593 TEST_P(pincputest, PinCpuWhenThreadStateReady) {
594     LTRACEF("PinCpuWhenThreadStateReady\n");
595     _state->expected_state_peer = THREAD_READY;
596     _state->blockingstate_use_mutex = false;
597     pincputest_unittest_thread(_state);
598     ASSERT_EQ(HasFailure(), 0);
599 test_abort:;
600 }
601 
TEST_P(pincputest,PinCpuWhenThreadStateSleeping)602 TEST_P(pincputest, PinCpuWhenThreadStateSleeping) {
603     LTRACEF("PinCpuWhenThreadStateSleeping\n");
604     _state->expected_state_peer = THREAD_SLEEPING;
605     _state->blockingstate_use_mutex = false;
606     pincputest_unittest_thread(_state);
607     ASSERT_EQ(HasFailure(), 0);
608 test_abort:;
609 }
610 
TEST_P(pincputest,PinCpuWhenThreadStateBlockingOnMutex)611 TEST_P(pincputest, PinCpuWhenThreadStateBlockingOnMutex) {
612     LTRACEF("PinCpuWhenThreadStateBlockingOnMutex\n");
613     _state->expected_state_peer = THREAD_BLOCKED;
614     _state->blockingstate_use_mutex = true;
615     pincputest_unittest_thread(_state);
616     ASSERT_EQ(HasFailure(), 0);
617 test_abort:;
618 }
619 
TEST_P(pincputest,PinCpuWhenThreadStateBlockingOnEvent)620 TEST_P(pincputest, PinCpuWhenThreadStateBlockingOnEvent) {
621     LTRACEF("PinCpuWhenThreadStateBlockingOnEvent\n");
622     _state->expected_state_peer = THREAD_BLOCKED;
623     _state->blockingstate_use_mutex = false;
624     pincputest_unittest_thread(_state);
625     ASSERT_EQ(HasFailure(), 0);
626 test_abort:;
627 }
628 
629 INSTANTIATE_TEST_SUITE_P(
630         standard_threads,
631         pincputest,
632         testing_Combine(/* is_main_rt: main thread is standard */
633                         testing_Values(0),
634                         /* is_main_rt: peer thread is standard */
635                         testing_Values(0),
636                         /* pinned_is_current: peer thread pinned to current */
637                         testing_Bool(),
638                         /* main thread priority */
639                         testing_Values(HIGH_PRIORITY),
640                         /* peer thread priority */
641                         testing_Values(HIGH_PRIORITY,
642                                        HIGH_PRIORITY + 2,
643                                        HIGH_PRIORITY - 2)));
644 
645 INSTANTIATE_TEST_SUITE_P(
646         current_is_realtime,
647         pincputest,
648         testing_Combine(/* is_main_rt: main thread is standard or real-time */
649                         testing_Values(1),
650                         /* is_main_rt: peer thread is real-time */
651                         testing_Values(0),
652                         /* pinned_is_current: peer thread pinned to current */
653                         testing_Values(1),  // testing_Bool(),
654                         /* main thread priority */
655                         testing_Values(HIGH_PRIORITY),
656                         /* peer thread priority */
657                         testing_Values(HIGH_PRIORITY,
658                                        HIGH_PRIORITY + 2,
659                                        HIGH_PRIORITY - 2)));
660 
661 PORT_TEST(pincputest, "com.android.kernel.pincputest");
662