1 /*
2 * Copyright (c) 2018 Google, Inc.
3 *
4 * SPDX-License-Identifier: GPL-2.0-or-later
5 *
6 * Six RT FIFO tasks are created and affined to the same CPU. They execute
7 * with a particular pattern of overlapping eligibility to run. The resulting
8 * execution pattern is checked to see that the tasks execute as expected given
9 * their priorities.
10 */
11
12 #define _GNU_SOURCE
13 #include <errno.h>
14 #include <pthread.h>
15 #include <sched.h>
16 #include <semaphore.h>
17 #include <time.h>
18
19 #include "tst_test.h"
20 #include "tst_safe_file_ops.h"
21 #include "tst_safe_pthread.h"
22
23 #include "trace_parse.h"
24 #include "util.h"
25
26 #define TRACE_EVENTS "sched_wakeup sched_switch sched_process_exit"
27
28 static int rt_task_tids[6];
29
30 /*
31 * Create two of each RT FIFO task at each priority level. Ensure that
32 * - higher priority RT tasks preempt lower priority RT tasks
33 * - newly woken RT tasks at the same priority level do not preempt currently
34 * running RT tasks
35 *
36 * Affine all tasks to CPU 0.
37 * Have rt_low_fn 1 run first. It wakes up rt_low_fn 2, which should not run
38 * until rt_low_fn 1 sleeps/exits.
39 * rt_low_fn2 wakes rt_med_fn1. rt_med_fn1 should run immediately, then sleep,
40 * allowing rt_low_fn2 to complete.
41 * rt_med_fn1 wakes rt_med_fn2, which should not run until rt_med_fn 2
42 * sleeps/exits... (etc)
43 */
44 static sem_t sem_high_b;
45 static sem_t sem_high_a;
46 static sem_t sem_med_b;
47 static sem_t sem_med_a;
48 static sem_t sem_low_b;
49 static sem_t sem_low_a;
50
51 enum {
52 RT_LOW_FN_A_TID = 0,
53 RT_LOW_FN_B_TID,
54 RT_MED_FN_A_TID,
55 RT_MED_FN_B_TID,
56 RT_HIGH_FN_A_TID,
57 RT_HIGH_FN_B_TID,
58 };
59
60 struct expected_event {
61 int event_type;
62 /*
63 * If sched_wakeup, pid being woken.
64 * If sched_switch, pid being switched to.
65 */
66 int event_data;
67 };
68 static struct expected_event events[] = {
69 /* rt_low_fn_a wakes up rt_low_fn_b:
70 * sched_wakeup(rt_low_fn_b) */
71 { .event_type = TRACE_RECORD_SCHED_WAKEUP,
72 .event_data = RT_LOW_FN_B_TID},
73 /* TODO: Expect an event for the exit of rt_low_fn_a. */
74 /* 3ms goes by, then rt_low_fn_a exits and rt_low_fn_b starts running */
75 { .event_type = TRACE_RECORD_SCHED_SWITCH,
76 .event_data = RT_LOW_FN_B_TID},
77 /* rt_low_fn_b wakes rt_med_fn_a which runs immediately */
78 { .event_type = TRACE_RECORD_SCHED_WAKEUP,
79 .event_data = RT_MED_FN_A_TID},
80 { .event_type = TRACE_RECORD_SCHED_SWITCH,
81 .event_data = RT_MED_FN_A_TID},
82 /* rt_med_fn_a sleeps, allowing rt_low_fn_b time to exit */
83 { .event_type = TRACE_RECORD_SCHED_SWITCH,
84 .event_data = RT_LOW_FN_B_TID},
85 /* TODO: Expect an event for the exit of rt_low_fn_b. */
86 { .event_type = TRACE_RECORD_SCHED_WAKEUP,
87 .event_data = RT_MED_FN_A_TID},
88 { .event_type = TRACE_RECORD_SCHED_SWITCH,
89 .event_data = RT_MED_FN_A_TID},
90 /* rt_med_fn_a wakes rt_med_fn_b */
91 { .event_type = TRACE_RECORD_SCHED_WAKEUP,
92 .event_data = RT_MED_FN_B_TID},
93 /* 3ms goes by, then rt_med_fn_a exits and rt_med_fn_b starts running */
94 /* TODO: Expect an event for the exit of rt_med_fn_a */
95 { .event_type = TRACE_RECORD_SCHED_SWITCH,
96 .event_data = RT_MED_FN_B_TID},
97 /* rt_med_fn_b wakes up rt_high_fn_a which runs immediately */
98 { .event_type = TRACE_RECORD_SCHED_WAKEUP,
99 .event_data = RT_HIGH_FN_A_TID},
100 { .event_type = TRACE_RECORD_SCHED_SWITCH,
101 .event_data = RT_HIGH_FN_A_TID},
102 /* rt_high_fn_a sleeps, allowing rt_med_fn_b time to exit */
103 { .event_type = TRACE_RECORD_SCHED_SWITCH,
104 .event_data = RT_MED_FN_B_TID},
105 /* TODO: Expect an event for the exit of rt_med_fn_b */
106 { .event_type = TRACE_RECORD_SCHED_WAKEUP,
107 .event_data = RT_HIGH_FN_A_TID},
108 { .event_type = TRACE_RECORD_SCHED_SWITCH,
109 .event_data = RT_HIGH_FN_A_TID},
110 /* rt_high_fn_a wakes up rt_high_fn_b */
111 { .event_type = TRACE_RECORD_SCHED_WAKEUP,
112 .event_data = RT_HIGH_FN_B_TID},
113 /* 3ms goes by, then rt_high_fn_a exits and rt_high_fn_b starts running */
114 /* TODO: Expect an event for the exit of rt_high_fn_a */
115 { .event_type = TRACE_RECORD_SCHED_SWITCH,
116 .event_data = RT_HIGH_FN_B_TID},
117 };
118
rt_high_fn_b(void * arg LTP_ATTRIBUTE_UNUSED)119 static void *rt_high_fn_b(void *arg LTP_ATTRIBUTE_UNUSED)
120 {
121 rt_task_tids[RT_HIGH_FN_B_TID] = gettid();
122 affine(0);
123
124 /* Wait for rt_high_fn_a to wake us up. */
125 sem_wait(&sem_high_b);
126 /* Run after rt_high_fn_a exits. */
127
128 return NULL;
129 }
130
rt_high_fn_a(void * arg LTP_ATTRIBUTE_UNUSED)131 static void *rt_high_fn_a(void *arg LTP_ATTRIBUTE_UNUSED)
132 {
133 rt_task_tids[RT_HIGH_FN_A_TID] = gettid();
134 affine(0);
135
136 /* Wait for rt_med_fn_b to wake us up. */
137 sem_wait(&sem_high_a);
138
139 /* Sleep, allowing rt_med_fn_b a chance to exit. */
140 usleep(1000);
141
142 /* Wake up rt_high_fn_b. We should continue to run though. */
143 sem_post(&sem_high_b);
144
145 /* Busy wait for just a bit. */
146 burn(3000, 0);
147
148 return NULL;
149 }
150
rt_med_fn_b(void * arg LTP_ATTRIBUTE_UNUSED)151 static void *rt_med_fn_b(void *arg LTP_ATTRIBUTE_UNUSED)
152 {
153 rt_task_tids[RT_MED_FN_B_TID] = gettid();
154 affine(0);
155
156 /* Wait for rt_med_fn_a to wake us up. */
157 sem_wait(&sem_med_b);
158 /* Run after rt_med_fn_a exits. */
159
160 /* This will wake up rt_high_fn_a which will run immediately, preempting
161 * us. */
162 sem_post(&sem_high_a);
163
164 return NULL;
165 }
166
rt_med_fn_a(void * arg LTP_ATTRIBUTE_UNUSED)167 static void *rt_med_fn_a(void *arg LTP_ATTRIBUTE_UNUSED)
168 {
169 rt_task_tids[RT_MED_FN_A_TID] = gettid();
170 affine(0);
171
172 /* Wait for rt_low_fn_b to wake us up. */
173 sem_wait(&sem_med_a);
174
175 /* Sleep, allowing rt_low_fn_b a chance to exit. */
176 usleep(3000);
177
178 /* Wake up rt_med_fn_b. We should continue to run though. */
179 sem_post(&sem_med_b);
180
181 /* Busy wait for just a bit. */
182 burn(3000, 0);
183
184 return NULL;
185 }
186
rt_low_fn_b(void * arg LTP_ATTRIBUTE_UNUSED)187 static void *rt_low_fn_b(void *arg LTP_ATTRIBUTE_UNUSED)
188 {
189 rt_task_tids[RT_LOW_FN_B_TID] = gettid();
190 affine(0);
191
192 /* Wait for rt_low_fn_a to wake us up. */
193 sem_wait(&sem_low_b);
194 /* Run after rt_low_fn_a exits. */
195
196 /* This will wake up rt_med_fn_a which will run immediately, preempting
197 * us. */
198 sem_post(&sem_med_a);
199
200 /* So the previous sem_post isn't actually causing a sched_switch
201 * to med_a immediately - this is running a bit longer and exiting.
202 * Delay here. */
203 burn(1000, 0);
204
205 return NULL;
206 }
207
208 /* Put real task tids into the expected events. */
fixup_expected_events(void)209 static void fixup_expected_events(void)
210 {
211 int i;
212 int size = sizeof(events)/sizeof(struct expected_event);
213
214 for (i = 0; i < size; i++)
215 events[i].event_data = rt_task_tids[events[i].event_data];
216 }
217
rt_low_fn_a(void * arg LTP_ATTRIBUTE_UNUSED)218 static void *rt_low_fn_a(void *arg LTP_ATTRIBUTE_UNUSED)
219 {
220 rt_task_tids[RT_LOW_FN_A_TID] = gettid();
221 affine(0);
222
223 /* Give all other tasks a chance to set their tids and block. */
224 usleep(3000);
225
226 fixup_expected_events();
227
228 SAFE_FILE_PRINTF(TRACING_DIR "trace_marker", "TEST START");
229
230 /* Wake up rt_low_fn_b. We should continue to run though. */
231 sem_post(&sem_low_b);
232
233 /* Busy wait for just a bit. */
234 burn(3000, 0);
235
236 return NULL;
237 }
238
239 /* Returns whether the given tid is a tid of one of the RT tasks in this
240 * testcase. */
rt_tid(int tid)241 static int rt_tid(int tid)
242 {
243 int i;
244 for (i = 0; i < 6; i++)
245 if (rt_task_tids[i] == tid)
246 return 1;
247 return 0;
248 }
249
parse_results(void)250 static int parse_results(void)
251 {
252 int i;
253 int test_start = 0;
254 int event_idx = 0;
255 int events_size = sizeof(events)/sizeof(struct expected_event);
256
257 for (i = 0; i < num_trace_records; i++) {
258 if (trace[i].event_type == TRACE_RECORD_TRACING_MARK_WRITE &&
259 !strcmp(trace[i].event_data, "TEST START"))
260 test_start = 1;
261
262 if (!test_start)
263 continue;
264
265 if (trace[i].event_type != TRACE_RECORD_SCHED_WAKEUP &&
266 trace[i].event_type != TRACE_RECORD_SCHED_SWITCH)
267 continue;
268
269 if (trace[i].event_type == TRACE_RECORD_SCHED_SWITCH) {
270 struct trace_sched_switch *t = trace[i].event_data;
271 if (!rt_tid(t->next_pid))
272 continue;
273 if (events[event_idx].event_type !=
274 TRACE_RECORD_SCHED_SWITCH ||
275 events[event_idx].event_data !=
276 t->next_pid) {
277 printf("Test case failed, expecting event "
278 "index %d type %d for tid %d, "
279 "got sched switch to tid %d\n",
280 event_idx,
281 events[event_idx].event_type,
282 events[event_idx].event_data,
283 t->next_pid);
284 return -1;
285 }
286 event_idx++;
287 }
288
289 if (trace[i].event_type == TRACE_RECORD_SCHED_WAKEUP) {
290 struct trace_sched_wakeup *t = trace[i].event_data;
291 if (!rt_tid(t->pid))
292 continue;
293 if (events[event_idx].event_type !=
294 TRACE_RECORD_SCHED_WAKEUP ||
295 events[event_idx].event_data !=
296 t->pid) {
297 printf("Test case failed, expecting event "
298 "index %d type %d for tid %d, "
299 "got sched wakeup to tid %d\n",
300 event_idx,
301 events[event_idx].event_type,
302 events[event_idx].event_data,
303 t->pid);
304 return -1;
305 }
306 event_idx++;
307 }
308
309 if (event_idx == events_size)
310 break;
311 }
312
313 if (event_idx != events_size) {
314 printf("Test case failed, "
315 "did not complete all expected events.\n");
316 printf("Next expected event: event type %d for tid %d\n",
317 events[event_idx].event_type,
318 events[event_idx].event_data);
319 return -1;
320 }
321
322 return 0;
323 }
324
create_rt_thread(int prio,void * fn,pthread_t * rt_thread)325 static void create_rt_thread(int prio, void *fn, pthread_t *rt_thread)
326 {
327 pthread_attr_t rt_thread_attrs;
328 struct sched_param rt_thread_sched_params;
329
330 ERROR_CHECK(pthread_attr_init(&rt_thread_attrs));
331 ERROR_CHECK(pthread_attr_setinheritsched(&rt_thread_attrs,
332 PTHREAD_EXPLICIT_SCHED));
333 ERROR_CHECK(pthread_attr_setschedpolicy(&rt_thread_attrs,
334 SCHED_FIFO));
335 rt_thread_sched_params.sched_priority = prio;
336 ERROR_CHECK(pthread_attr_setschedparam(&rt_thread_attrs,
337 &rt_thread_sched_params));
338
339 SAFE_PTHREAD_CREATE(rt_thread, &rt_thread_attrs, fn, NULL);
340 }
341
run(void)342 static void run(void)
343 {
344 pthread_t rt_low_a, rt_low_b;
345 pthread_t rt_med_a, rt_med_b;
346 pthread_t rt_high_a, rt_high_b;
347
348 sem_init(&sem_high_b, 0, 0);
349 sem_init(&sem_high_a, 0, 0);
350 sem_init(&sem_med_b, 0, 0);
351 sem_init(&sem_med_a, 0, 0);
352 sem_init(&sem_low_b, 0, 0);
353 sem_init(&sem_low_a, 0, 0);
354
355 /* configure and enable tracing */
356 SAFE_FILE_PRINTF(TRACING_DIR "tracing_on", "0");
357 SAFE_FILE_PRINTF(TRACING_DIR "buffer_size_kb", "16384");
358 SAFE_FILE_PRINTF(TRACING_DIR "set_event", TRACE_EVENTS);
359 SAFE_FILE_PRINTF(TRACING_DIR "trace", "\n");
360 SAFE_FILE_PRINTF(TRACING_DIR "tracing_on", "1");
361
362 create_rt_thread(70, rt_low_fn_a, &rt_low_a);
363 create_rt_thread(70, rt_low_fn_b, &rt_low_b);
364 create_rt_thread(75, rt_med_fn_a, &rt_med_a);
365 create_rt_thread(75, rt_med_fn_b, &rt_med_b);
366 create_rt_thread(80, rt_high_fn_a, &rt_high_a);
367 create_rt_thread(80, rt_high_fn_b, &rt_high_b);
368
369 SAFE_PTHREAD_JOIN(rt_low_a, NULL);
370 SAFE_PTHREAD_JOIN(rt_low_b, NULL);
371 SAFE_PTHREAD_JOIN(rt_med_a, NULL);
372 SAFE_PTHREAD_JOIN(rt_med_b, NULL);
373 SAFE_PTHREAD_JOIN(rt_high_a, NULL);
374 SAFE_PTHREAD_JOIN(rt_high_b, NULL);
375
376 /* disable tracing */
377 SAFE_FILE_PRINTF(TRACING_DIR "tracing_on", "0");
378 LOAD_TRACE();
379
380 if (parse_results())
381 tst_res(TFAIL, "RT FIFO tasks did not execute in the expected "
382 "pattern.\n");
383 else
384 tst_res(TPASS, "RT FIFO tasks executed in the expected "
385 "pattern.\n");
386 }
387
388 static struct tst_test test = {
389 .test_all = run,
390 .cleanup = trace_cleanup,
391 };
392