• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2018 Google, Inc.
3  *
4  * SPDX-License-Identifier: GPL-2.0-or-later
5  *
6  * A CFS task is affined to a particular CPU. The task runs as a CPU hog for a
7  * while then as a very small task for a while. The latency for the CPU
8  * frequency of the CPU to reach max and then min is verified.
9  */
10 
11 #define _GNU_SOURCE
12 #include <errno.h>
13 #include <pthread.h>
14 #include <sched.h>
15 #include <time.h>
16 
17 #include "tst_test.h"
18 #include "tst_safe_file_ops.h"
19 #include "tst_safe_pthread.h"
20 
21 #include "trace_parse.h"
22 #include "util.h"
23 
24 #define TRACE_EVENTS "sched_process_exit sched_process_fork cpu_frequency"
25 
26 #define MAX_FREQ_INCREASE_LATENCY_US 70000
27 #define MAX_FREQ_DECREASE_LATENCY_US 70000
28 
29 static int test_cpu;
30 
31 #define BURN_MSEC 500
burn_fn(void * arg LTP_ATTRIBUTE_UNUSED)32 static void *burn_fn(void *arg LTP_ATTRIBUTE_UNUSED)
33 {
34 	int i = 0;
35 	unsigned int scaling_min_freq, scaling_cur_freq;
36 	char scaling_freq_file[60];
37 
38 	affine(test_cpu);
39 
40 	/*
41 	 * wait a bit to allow any hacks to boost frequency on migration
42 	 * to take effect
43 	 */
44 	usleep(200);
45 
46 	sprintf(scaling_freq_file,
47 		"/sys/devices/system/cpu/cpu%d/cpufreq/scaling_min_freq",
48 		test_cpu);
49 	SAFE_FILE_SCANF(scaling_freq_file, "%d", &scaling_min_freq);
50 
51 	sprintf(scaling_freq_file,
52 		"/sys/devices/system/cpu/cpu%d/cpufreq/scaling_cur_freq",
53 		test_cpu);
54 
55 	/* wait for test_cpu to reach scaling_min_freq */
56 	while(i++ < 10) {
57 		usleep(100 * 1000);
58 		SAFE_FILE_SCANF(scaling_freq_file, "%d",
59 				&scaling_cur_freq);
60 		if (scaling_cur_freq == scaling_min_freq)
61 			break;
62 	}
63 	if (i >= 10) {
64 		printf("Unable to reach scaling_min_freq before test!\n");
65 		return NULL;
66 	}
67 
68 	tracefs_write("trace_marker", "affined");
69 	burn(BURN_MSEC * 1000, 0);
70 	tracefs_write("trace_marker", "small task");
71 	burn(BURN_MSEC * 1000, 1);
72 
73 	return NULL;
74 }
75 
parse_results(void)76 static int parse_results(void)
77 {
78 	int i;
79 
80 	int start_idx;
81 	int sleep_idx;
82 	unsigned int max_freq_seen = 0;
83 	int max_freq_seen_idx;
84 	unsigned int min_freq_seen = UINT_MAX;
85 	int min_freq_seen_idx;
86 
87 	char scaling_freq_file[60];
88 	unsigned int scaling_max_freq;
89 	unsigned int scaling_min_freq;
90 
91 	unsigned int increase_latency_usec;
92 	unsigned int decrease_latency_usec;
93 
94 	/* find starting timestamp of test */
95 	for (i = 0; i < num_trace_records; i++)
96 		if (trace[i].event_type == TRACE_RECORD_TRACING_MARK_WRITE &&
97 		    !strcmp(trace[i].event_data, "affined"))
98 			break;
99 	if (i == num_trace_records) {
100 		printf("Did not find start of burn thread in trace!\n");
101 		return -1;
102 	}
103 	start_idx = i;
104 
105 	/* find timestamp when burn thread sleeps */
106 	for (; i < num_trace_records; i++)
107 		if (trace[i].event_type == TRACE_RECORD_TRACING_MARK_WRITE &&
108 		    !strcmp(trace[i].event_data, "small task"))
109 				break;
110 	if (i == num_trace_records) {
111 		printf("Did not find switch to small task of burn thread in "
112 		       "trace!\n");
113 		return -1;
114 	}
115 	sleep_idx = i;
116 
117 	/* find highest CPU frequency bewteen start and sleep timestamp */
118 	for (i = start_idx; i < sleep_idx; i++)
119 		if (trace[i].event_type == TRACE_RECORD_CPU_FREQUENCY) {
120 			struct trace_cpu_frequency *t = trace[i].event_data;
121 			if (t->cpu == test_cpu && t->state > max_freq_seen) {
122 				max_freq_seen = t->state;
123 				max_freq_seen_idx = i;
124 			}
125 		}
126 	if (max_freq_seen == 0) {
127 		printf("No freq events between start and sleep!\n");
128 		return -1;
129 	}
130 
131 	/* find lowest CPU frequency between sleep timestamp and end */
132 	for (; i < num_trace_records; i++)
133 		if (trace[i].event_type == TRACE_RECORD_CPU_FREQUENCY) {
134 			struct trace_cpu_frequency *t = trace[i].event_data;
135 			if (t->cpu == test_cpu && t->state < min_freq_seen) {
136 				min_freq_seen = t->state;
137 				min_freq_seen_idx = i;
138 			}
139 		}
140 	if (min_freq_seen == UINT_MAX) {
141 		printf("No freq events between sleep and end!\n");
142 		return -1;
143 	}
144 
145 	/* is highest CPU freq equal or greater than highest reported in
146 	 * scaling_max_freq? */
147 	sprintf(scaling_freq_file,
148 		"/sys/devices/system/cpu/cpu%d/cpufreq/scaling_max_freq",
149 		test_cpu);
150 	SAFE_FILE_SCANF(scaling_freq_file, "%d", &scaling_max_freq);
151 	if (max_freq_seen < scaling_max_freq) {
152 		printf("CPU%d did not reach scaling_max_freq!\n",
153 		       test_cpu);
154 		return -1;
155 	} else {
156 		printf("CPU%d reached %d MHz during test "
157 		       "(scaling_max_freq %d MHz).\n", test_cpu,
158 		       max_freq_seen / 1000, scaling_max_freq / 1000);
159 	}
160 
161 	/* is lowest CPU freq equal or less than scaling_min_freq? */
162 	sprintf(scaling_freq_file,
163 		"/sys/devices/system/cpu/cpu%d/cpufreq/scaling_min_freq",
164 		test_cpu);
165 	SAFE_FILE_SCANF(scaling_freq_file, "%d", &scaling_min_freq);
166 	if (min_freq_seen > scaling_min_freq) {
167 		printf("CPU%d did not reach scaling_min_freq!\n",
168 		       test_cpu);
169 		return -1;
170 	} else {
171 		printf("CPU%d reached %d MHz after test "
172 		       "(scaling_min_freq %d Mhz).\n",
173 		       test_cpu, min_freq_seen / 1000,
174 		       scaling_min_freq / 1000);
175 	}
176 
177 	/* calculate and check latencies */
178 	increase_latency_usec = trace[max_freq_seen_idx].ts.sec * USEC_PER_SEC +
179 		trace[max_freq_seen_idx].ts.usec;
180 	increase_latency_usec -= trace[start_idx].ts.sec * USEC_PER_SEC +
181 		trace[start_idx].ts.usec;
182 
183 	decrease_latency_usec = trace[min_freq_seen_idx].ts.sec * USEC_PER_SEC +
184 		trace[min_freq_seen_idx].ts.usec;
185 	decrease_latency_usec -= trace[sleep_idx].ts.sec * USEC_PER_SEC +
186 		trace[sleep_idx].ts.usec;
187 
188 	printf("Increase latency: %d usec\n", increase_latency_usec);
189 	printf("Decrease latency: %d usec\n", decrease_latency_usec);
190 
191 	return (increase_latency_usec > MAX_FREQ_INCREASE_LATENCY_US ||
192 		decrease_latency_usec > MAX_FREQ_DECREASE_LATENCY_US);
193 }
194 
run(void)195 static void run(void)
196 {
197 	pthread_t burn_thread;
198 
199 	tst_res(TINFO, "Max acceptable latency to fmax: %d usec\n",
200 		MAX_FREQ_INCREASE_LATENCY_US);
201 	tst_res(TINFO, "Max acceptable latency to fmin: %d usec\n",
202 		MAX_FREQ_DECREASE_LATENCY_US);
203 
204 	test_cpu = tst_ncpus() - 1;
205 	printf("CPU hog will be bound to CPU %d.\n", test_cpu);
206 
207 	/* configure and enable tracing */
208 	tracefs_write("tracing_on", "0");
209 	tracefs_write("buffer_size_kb", "16384");
210 	tracefs_write("set_event", TRACE_EVENTS);
211 	tracefs_write("trace", "\n");
212 	tracefs_write("tracing_on", "1");
213 
214 	SAFE_PTHREAD_CREATE(&burn_thread, NULL, burn_fn, NULL);
215 	SAFE_PTHREAD_JOIN(burn_thread, NULL);
216 
217 	/* disable tracing */
218 	tracefs_write("tracing_on", "0");
219 	LOAD_TRACE();
220 
221 	if (parse_results())
222 		tst_res(TFAIL, "Governor did not meet latency targets.\n");
223 	else
224 		tst_res(TPASS, "Governor met latency targets.\n");
225 }
226 
227 static struct tst_test test = {
228 	.test_all = run,
229 	.setup = trace_setup,
230 	.cleanup = trace_cleanup,
231 };
232