1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (c) 2017 Richard Palethorpe <rpalethorpe@suse.com>
4 */
5 /*
6 * Perform a small read on every file in a directory tree.
7 *
8 * Useful for testing file systems like proc, sysfs and debugfs or anything
9 * which exposes a file like API so long as it respects O_NONBLOCK. This test
10 * is not concerned if a particular file in one of these file systems conforms
11 * exactly to its specific documented behavior. Just whether reading from that
12 * file causes a serious error such as a NULL pointer dereference.
13 *
14 * It is not required to run this as root, but test coverage will be much
15 * higher with full privileges.
16 *
17 * The reads are preformed by worker processes which are given file paths by a
18 * single parent process. The parent process recursively scans a given
19 * directory and passes the file paths it finds to the child processes using a
20 * queue structure stored in shared memory.
21 *
22 * This allows the file system and individual files to be accessed in
23 * parallel. Passing the 'reads' parameter (-r) will encourage this. The
24 * number of worker processes is based on the number of available
25 * processors. However this is limited by default to 15 to avoid this becoming
26 * an IPC stress test on systems with large numbers of weak cores. This can be
27 * overridden with the 'w' parameters.
28 */
29 #include <sys/types.h>
30 #include <sys/stat.h>
31 #include <lapi/fnmatch.h>
32 #include <stdlib.h>
33 #include <stdio.h>
34 #include <string.h>
35 #include <dirent.h>
36 #include <errno.h>
37 #include <unistd.h>
38 #include <string.h>
39 #include <limits.h>
40 #include <fnmatch.h>
41 #include <semaphore.h>
42 #include <ctype.h>
43 #include <pwd.h>
44 #include <grp.h>
45
46 #include "tst_test.h"
47
48 #define QUEUE_SIZE 16384
49 #define BUFFER_SIZE 1024
50 #define MAX_PATH 4096
51 #define MAX_DISPLAY 40
52
53 struct queue {
54 sem_t sem;
55 int front;
56 int back;
57 char data[QUEUE_SIZE];
58 };
59
60 struct worker {
61 pid_t pid;
62 struct queue *q;
63 };
64
65 enum dent_action {
66 DA_UNKNOWN,
67 DA_IGNORE,
68 DA_READ,
69 DA_VISIT,
70 };
71
72 static char *verbose;
73 static char *quiet;
74 static char *root_dir;
75 static char *str_reads;
76 static int reads = 1;
77 static char *str_worker_count;
78 static long worker_count;
79 static char *str_max_workers;
80 static long max_workers = 15;
81 static struct worker *workers;
82 static char *drop_privs;
83
84 static char *blacklist[] = {
85 NULL, /* reserved for -e parameter */
86 "/sys/power/wakeup_count",
87 "/sys/kernel/debug/*",
88 "/sys/devices/platform/*/eeprom",
89 "/sys/devices/platform/*/nvmem",
90 };
91
92 static struct tst_option options[] = {
93 {"v", &verbose,
94 "-v Print information about successful reads."},
95 {"q", &quiet,
96 "-q Don't print file read or open errors."},
97 {"d:", &root_dir,
98 "-d path Path to the directory to read from, defaults to /sys."},
99 {"e:", &blacklist[0],
100 "-e pattern Ignore files which match an 'extended' pattern, see fnmatch(3)."},
101 {"r:", &str_reads,
102 "-r count The number of times to schedule a file for reading."},
103 {"w:", &str_max_workers,
104 "-w count Set the worker count limit, the default is 15."},
105 {"W:", &str_worker_count,
106 "-W count Override the worker count. Ignores (-w) and the processor count."},
107 {"p", &drop_privs,
108 "-p Drop privileges; switch to the nobody user."},
109 {NULL, NULL, NULL}
110 };
111
queue_pop(struct queue * q,char * buf)112 static int queue_pop(struct queue *q, char *buf)
113 {
114 int i = q->front, j = 0;
115
116 sem_wait(&q->sem);
117
118 if (!q->data[i])
119 return 0;
120
121 while (q->data[i]) {
122 buf[j] = q->data[i];
123
124 if (++j >= BUFFER_SIZE - 1)
125 tst_brk(TBROK, "Buffer is too small for path");
126
127 i = (i + 1) % QUEUE_SIZE;
128 }
129
130 buf[j] = '\0';
131 tst_atomic_store((i + 1) % QUEUE_SIZE, &q->front);
132
133 return 1;
134 }
135
queue_push(struct queue * q,const char * buf)136 static int queue_push(struct queue *q, const char *buf)
137 {
138 int i = q->back, j = 0;
139 int front = tst_atomic_load(&q->front);
140
141 do {
142 q->data[i] = buf[j];
143
144 i = (i + 1) % QUEUE_SIZE;
145
146 if (i == front)
147 return 0;
148
149 } while (buf[j++]);
150
151 q->back = i;
152 sem_post(&q->sem);
153
154 return 1;
155 }
156
queue_init(void)157 static struct queue *queue_init(void)
158 {
159 struct queue *q = SAFE_MMAP(NULL, sizeof(*q),
160 PROT_READ | PROT_WRITE,
161 MAP_SHARED | MAP_ANONYMOUS,
162 0, 0);
163
164 sem_init(&q->sem, 1, 0);
165 q->front = 0;
166 q->back = 0;
167
168 return q;
169 }
170
queue_destroy(struct queue * q,int is_worker)171 static void queue_destroy(struct queue *q, int is_worker)
172 {
173 if (is_worker)
174 sem_destroy(&q->sem);
175
176 SAFE_MUNMAP(q, sizeof(*q));
177 }
178
sanitize_str(char * buf,ssize_t count)179 static void sanitize_str(char *buf, ssize_t count)
180 {
181 int i;
182
183 for (i = 0; i < MIN(count, MAX_DISPLAY); i++)
184 if (!isprint(buf[i]))
185 buf[i] = ' ';
186
187 if (count <= MAX_DISPLAY)
188 buf[count] = '\0';
189 else
190 strcpy(buf + MAX_DISPLAY, "...");
191 }
192
is_blacklisted(const char * path)193 static int is_blacklisted(const char *path)
194 {
195 unsigned int i;
196
197 for (i = 0; i < ARRAY_SIZE(blacklist); i++) {
198 if (blacklist[i] && !fnmatch(blacklist[i], path, FNM_EXTMATCH)) {
199 if (verbose)
200 tst_res(TINFO, "Ignoring %s", path);
201 return 1;
202 }
203 }
204
205 return 0;
206 }
207
read_test(const char * path)208 static void read_test(const char *path)
209 {
210 char buf[BUFFER_SIZE];
211 int fd;
212 ssize_t count;
213
214 if (is_blacklisted(path))
215 return;
216
217 if (verbose)
218 tst_res(TINFO, "%s(%s)", __func__, path);
219
220 fd = open(path, O_RDONLY | O_NONBLOCK);
221 if (fd < 0) {
222 if (!quiet)
223 tst_res(TINFO | TERRNO, "open(%s)", path);
224 return;
225 }
226
227 count = read(fd, buf, sizeof(buf) - 1);
228 if (count > 0 && verbose) {
229 sanitize_str(buf, count);
230 tst_res(TINFO, "read(%s, buf) = %zi, buf = %s",
231 path, count, buf);
232 } else if (!count && verbose) {
233 tst_res(TINFO, "read(%s) = EOF", path);
234 } else if (count < 0 && !quiet) {
235 tst_res(TINFO | TERRNO, "read(%s)", path);
236 }
237
238 SAFE_CLOSE(fd);
239 }
240
worker_run(struct worker * self)241 static int worker_run(struct worker *self)
242 {
243 char buf[BUFFER_SIZE];
244 struct sigaction term_sa = {
245 .sa_handler = SIG_IGN,
246 .sa_flags = 0,
247 };
248 struct queue *q = self->q;
249
250 sigaction(SIGTTIN, &term_sa, NULL);
251
252 while (1) {
253 if (!queue_pop(q, buf))
254 break;
255
256 read_test(buf);
257 }
258
259 queue_destroy(q, 1);
260 tst_flush();
261 return 0;
262 }
263
maybe_drop_privs(void)264 static void maybe_drop_privs(void)
265 {
266 struct passwd *nobody;
267
268 if (!drop_privs)
269 return;
270
271 TEST(setgroups(0, NULL));
272 if (TST_RET < 0 && TST_ERR != EPERM) {
273 tst_brk(TBROK | TTERRNO,
274 "Failed to clear suplementary group set");
275 }
276
277 nobody = SAFE_GETPWNAM("nobody");
278
279 TEST(setgid(nobody->pw_gid));
280 if (TST_RET < 0 && TST_ERR != EPERM)
281 tst_brk(TBROK | TTERRNO, "Failed to use nobody gid");
282
283 TEST(setuid(nobody->pw_uid));
284 if (TST_RET < 0 && TST_ERR != EPERM)
285 tst_brk(TBROK | TTERRNO, "Failed to use nobody uid");
286 }
287
spawn_workers(void)288 static void spawn_workers(void)
289 {
290 int i;
291 struct worker *wa = workers;
292
293 memset(workers, 0, worker_count * sizeof(*workers));
294
295 for (i = 0; i < worker_count; i++) {
296 wa[i].q = queue_init();
297 wa[i].pid = SAFE_FORK();
298 if (!wa[i].pid) {
299 maybe_drop_privs();
300 exit(worker_run(wa + i));
301 }
302 }
303 }
304
work_push_retry(int worker,const char * buf)305 static void work_push_retry(int worker, const char *buf)
306 {
307 int i, ret, worker_min, worker_max, usleep_time = 100;
308
309 if (worker < 0) {
310 /* pick any, try -worker first */
311 worker_min = worker * (-1);
312 worker_max = worker_count;
313 } else {
314 /* keep trying worker */
315 worker_min = worker;
316 worker_max = worker + 1;
317 }
318 i = worker_min;
319
320 for (;;) {
321 ret = queue_push(workers[i].q, buf);
322 if (ret == 1)
323 break;
324
325 if (++i >= worker_max) {
326 i = worker_min;
327 if (usleep_time < 100000)
328 usleep_time *= 2;
329 usleep(usleep_time);
330 }
331 }
332 }
333
stop_workers(void)334 static void stop_workers(void)
335 {
336 const char stop_code[1] = { '\0' };
337 int i;
338
339 if (!workers)
340 return;
341
342 for (i = 0; i < worker_count; i++) {
343 if (workers[i].q)
344 work_push_retry(i, stop_code);
345 }
346
347 for (i = 0; i < worker_count; i++) {
348 if (workers[i].q) {
349 queue_destroy(workers[i].q, 0);
350 workers[i].q = 0;
351 }
352 }
353 }
354
rep_sched_work(const char * path,int rep)355 static void rep_sched_work(const char *path, int rep)
356 {
357 int i, j;
358
359 for (i = j = 0; i < rep; i++, j++) {
360 if (j >= worker_count)
361 j = 0;
362 work_push_retry(-j, path);
363 }
364 }
365
setup(void)366 static void setup(void)
367 {
368 if (tst_parse_int(str_reads, &reads, 1, INT_MAX))
369 tst_brk(TBROK,
370 "Invalid reads (-r) argument: '%s'", str_reads);
371
372 if (tst_parse_long(str_max_workers, &max_workers, 1, LONG_MAX)) {
373 tst_brk(TBROK,
374 "Invalid max workers (-w) argument: '%s'",
375 str_max_workers);
376 }
377
378 if (tst_parse_long(str_worker_count, &worker_count, 1, LONG_MAX)) {
379 tst_brk(TBROK,
380 "Invalid worker count (-W) argument: '%s'",
381 str_worker_count);
382 }
383
384 if (!root_dir)
385 tst_brk(TBROK, "The directory argument (-d) is required");
386
387 if (!worker_count)
388 worker_count = MIN(MAX(tst_ncpus() - 1, 1), max_workers);
389 workers = SAFE_MALLOC(worker_count * sizeof(*workers));
390 }
391
cleanup(void)392 static void cleanup(void)
393 {
394 stop_workers();
395 free(workers);
396 }
397
visit_dir(const char * path)398 static void visit_dir(const char *path)
399 {
400 DIR *dir;
401 struct dirent *dent;
402 struct stat dent_st;
403 char dent_path[MAX_PATH];
404 enum dent_action act;
405
406 dir = opendir(path);
407 if (!dir) {
408 tst_res(TINFO | TERRNO, "opendir(%s)", path);
409 return;
410 }
411
412 while (1) {
413 errno = 0;
414 dent = readdir(dir);
415 if (!dent && errno) {
416 tst_res(TINFO | TERRNO, "readdir(%s)", path);
417 break;
418 } else if (!dent) {
419 break;
420 }
421
422 if (!strcmp(dent->d_name, ".") ||
423 !strcmp(dent->d_name, ".."))
424 continue;
425
426 if (dent->d_type == DT_DIR)
427 act = DA_VISIT;
428 else if (dent->d_type == DT_LNK)
429 act = DA_IGNORE;
430 else if (dent->d_type == DT_UNKNOWN)
431 act = DA_UNKNOWN;
432 else
433 act = DA_READ;
434
435 snprintf(dent_path, MAX_PATH,
436 "%s/%s", path, dent->d_name);
437
438 if (act == DA_UNKNOWN) {
439 if (lstat(dent_path, &dent_st))
440 tst_res(TINFO | TERRNO, "lstat(%s)", path);
441 else if ((dent_st.st_mode & S_IFMT) == S_IFDIR)
442 act = DA_VISIT;
443 else if ((dent_st.st_mode & S_IFMT) == S_IFLNK)
444 act = DA_IGNORE;
445 else
446 act = DA_READ;
447 }
448
449 if (act == DA_VISIT)
450 visit_dir(dent_path);
451 else if (act == DA_READ)
452 rep_sched_work(dent_path, reads);
453 }
454
455 if (closedir(dir))
456 tst_res(TINFO | TERRNO, "closedir(%s)", path);
457 }
458
run(void)459 static void run(void)
460 {
461 spawn_workers();
462 visit_dir(root_dir);
463 stop_workers();
464
465 tst_reap_children();
466 tst_res(TPASS, "Finished reading files");
467 }
468
469 static struct tst_test test = {
470 .options = options,
471 .setup = setup,
472 .cleanup = cleanup,
473 .test_all = run,
474 .forks_child = 1,
475 };
476
477