1 /*
2 * Copyright (c) 2018 Google, Inc.
3 *
4 * SPDX-License-Identifier: GPL-2.0-or-later
5 *
6 * A task alternates between being big and small. Max up and down migration
7 * latencies and task placement are verified.
8 */
9
10 #define _GNU_SOURCE
11 #include <errno.h>
12 #include <pthread.h>
13 #include <sched.h>
14 #include <time.h>
15
16 #include "tst_test.h"
17 #include "tst_safe_file_ops.h"
18 #include "tst_safe_pthread.h"
19
20 #include "trace_parse.h"
21 #include "util.h"
22
23 #define TRACE_EVENTS "sched_switch"
24
25 static int task_tid;
26
27 #define MAX_UPMIGRATE_LATENCY_US 100000
28 #define MAX_DOWNMIGRATE_LATENCY_US 100000
29 #define MAX_INCORRECT_CLUSTER_PCT 10
30 #define BURN_SEC 1
31 #define NUM_LOOPS 10
task_fn(void * arg LTP_ATTRIBUTE_UNUSED)32 static void *task_fn(void *arg LTP_ATTRIBUTE_UNUSED)
33 {
34 int loops = NUM_LOOPS;
35
36 task_tid = gettid();
37
38 while (loops--) {
39 SAFE_FILE_PRINTF(TRACING_DIR "trace_marker", "SMALL TASK");
40 burn(BURN_SEC * USEC_PER_SEC, 1);
41
42 SAFE_FILE_PRINTF(TRACING_DIR "trace_marker", "CPU HOG");
43 burn(BURN_SEC * USEC_PER_SEC, 0);
44 }
45
46 return NULL;
47 }
48
parse_results(void)49 static int parse_results(void)
50 {
51 int i, pct, rv = 0;
52 unsigned long long exec_start_us = 0;
53 unsigned long long too_big_cpu_us = 0;
54 unsigned long long too_small_cpu_us = 0;
55 unsigned long long small_task_us = 0;
56 unsigned long long big_task_us = 0;
57 unsigned long long smalltask_ts_usec = 0;
58 unsigned long long cpuhog_ts_usec = 0;
59 unsigned long long upmigrate_ts_usec = 0;
60 unsigned long long downmigrate_ts_usec = 0;
61 unsigned long long max_upmigrate_latency_usec = 0;
62 unsigned long long max_downmigrate_latency_usec = 0;
63 cpu_set_t cpuset;
64
65 if (find_cpus_with_capacity(0, &cpuset)) {
66 printf("Failed to find the CPUs in the little cluster.\n");
67 return -1;
68 }
69
70 for (i = 0; i < num_trace_records; i++) {
71 unsigned long long segment_us;
72 struct trace_sched_switch *t = trace[i].event_data;
73
74 if (trace[i].event_type == TRACE_RECORD_TRACING_MARK_WRITE) {
75 if (!strcmp(trace[i].event_data, "CPU HOG")) {
76 /* Task is transitioning to cpu hog. */
77 cpuhog_ts_usec = TS_TO_USEC(trace[i].ts);
78 if (downmigrate_ts_usec) {
79 unsigned long long temp_latency;
80 temp_latency = downmigrate_ts_usec -
81 smalltask_ts_usec;
82 if (temp_latency >
83 max_downmigrate_latency_usec)
84 max_downmigrate_latency_usec =
85 temp_latency;
86 } else if (smalltask_ts_usec) {
87 printf("Warning: small task never "
88 "downmigrated.\n");
89 rv = 1;
90 }
91 downmigrate_ts_usec = 0;
92 smalltask_ts_usec = 0;
93 } else if (!strcmp(trace[i].event_data, "SMALL TASK")) {
94 smalltask_ts_usec = TS_TO_USEC(trace[i].ts);
95 if (upmigrate_ts_usec) {
96 unsigned long long temp_latency;
97 temp_latency = upmigrate_ts_usec -
98 cpuhog_ts_usec;
99 if (temp_latency >
100 max_upmigrate_latency_usec)
101 max_upmigrate_latency_usec =
102 temp_latency;
103 } else if (cpuhog_ts_usec) {
104 printf("Warning: big task never "
105 "upmigrated.\n");
106 rv = 1;
107 }
108 upmigrate_ts_usec = 0;
109 cpuhog_ts_usec = 0;
110 }
111 continue;
112 }
113
114 if (trace[i].event_type != TRACE_RECORD_SCHED_SWITCH)
115 continue;
116 if (t->next_pid == task_tid) {
117 /* Start of task execution segment. */
118 if (exec_start_us) {
119 printf("Trace parse fail: double exec start\n");
120 return -1;
121 }
122 exec_start_us = TS_TO_USEC(trace[i].ts);
123 if (cpuhog_ts_usec && !upmigrate_ts_usec &&
124 !CPU_ISSET(trace[i].cpu, &cpuset))
125 upmigrate_ts_usec = exec_start_us;
126 if (smalltask_ts_usec && !downmigrate_ts_usec &&
127 CPU_ISSET(trace[i].cpu, &cpuset))
128 downmigrate_ts_usec = exec_start_us;
129 continue;
130 }
131 if (t->prev_pid != task_tid)
132 continue;
133 /* End of task execution segment. */
134 segment_us = TS_TO_USEC(trace[i].ts);
135 segment_us -= exec_start_us;
136 exec_start_us = 0;
137 if (CPU_ISSET(trace[i].cpu, &cpuset)) {
138 /* Task is running on little CPUs. */
139 if (cpuhog_ts_usec) {
140 /*
141 * Upmigration is accounted separately, so only
142 * record mis-scheduled time here if it happened
143 * after upmigration.
144 */
145 if (upmigrate_ts_usec)
146 too_small_cpu_us += segment_us;
147 }
148 } else {
149 /* Task is running on big CPUs. */
150 if (smalltask_ts_usec) {
151 /*
152 * Downmigration is accounted separately, so
153 * only record mis-scheduled time here if it
154 * happened after downmigration.
155 */
156 if (downmigrate_ts_usec)
157 too_big_cpu_us += segment_us;
158 }
159 }
160 if (cpuhog_ts_usec)
161 big_task_us += segment_us;
162 if (smalltask_ts_usec)
163 small_task_us += segment_us;
164 }
165
166 pct = (too_big_cpu_us * 100) / small_task_us;
167 rv |= (pct > MAX_INCORRECT_CLUSTER_PCT);
168 printf("Time incorrectly scheduled on big when task was small, "
169 "after downmigration: "
170 "%lld usec (%d%% of small task CPU time)\n", too_big_cpu_us,
171 pct);
172 pct = (too_small_cpu_us * 100) / big_task_us;
173 rv |= (pct > MAX_INCORRECT_CLUSTER_PCT);
174 printf("Time incorrectly scheduled on small when task was big, "
175 "after upmigration: "
176 "%lld usec (%d%% of big task CPU time)\n", too_small_cpu_us,
177 pct);
178
179 printf("small task time: %lld\nbig task time: %lld\n",
180 small_task_us, big_task_us);
181
182 printf("Maximum upmigration time: %lld\n",
183 max_upmigrate_latency_usec);
184 printf("Maximum downmigration time: %lld\n",
185 max_downmigrate_latency_usec);
186
187 return (rv ||
188 max_upmigrate_latency_usec > MAX_UPMIGRATE_LATENCY_US ||
189 max_downmigrate_latency_usec > MAX_DOWNMIGRATE_LATENCY_US);
190 }
191
run(void)192 static void run(void)
193 {
194 pthread_t task_thread;
195
196 tst_res(TINFO, "Maximum incorrect cluster time percentage: %d%%",
197 MAX_INCORRECT_CLUSTER_PCT);
198 tst_res(TINFO, "Maximum downmigration latency: %d usec",
199 MAX_DOWNMIGRATE_LATENCY_US);
200 tst_res(TINFO, "Maximum upmigration latency: %d usec",
201 MAX_UPMIGRATE_LATENCY_US);
202
203 printf("Task alternating between big and small for %d sec\n",
204 BURN_SEC * NUM_LOOPS * 2);
205
206 /* configure and enable tracing */
207 SAFE_FILE_PRINTF(TRACING_DIR "tracing_on", "0");
208 SAFE_FILE_PRINTF(TRACING_DIR "buffer_size_kb", "16384");
209 SAFE_FILE_PRINTF(TRACING_DIR "set_event", TRACE_EVENTS);
210 SAFE_FILE_PRINTF(TRACING_DIR "trace", "\n");
211 SAFE_FILE_PRINTF(TRACING_DIR "tracing_on", "1");
212
213 SAFE_PTHREAD_CREATE(&task_thread, NULL, task_fn, NULL);
214 SAFE_PTHREAD_JOIN(task_thread, NULL);
215
216 /* disable tracing */
217 SAFE_FILE_PRINTF(TRACING_DIR "tracing_on", "0");
218 LOAD_TRACE();
219
220 if (parse_results())
221 tst_res(TFAIL, "Task placement and migration latency goals "
222 "were not met.\n");
223 else
224 tst_res(TPASS, "Task placement and migration latency goals "
225 "were met.\n");
226 }
227
228 static struct tst_test test = {
229 .test_all = run,
230 .cleanup = trace_cleanup,
231 };
232