1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (c) 2017 Richard Palethorpe <rpalethorpe@suse.com>
4 */
5
6 /*
7 * A basic regression test for tst_atomic_{load,store}. Also provides a
8 * limited check that atomic stores and loads order non-atomic memory
9 * accesses. That is, we are checking that they implement memory fences or
10 * barriers.
11 *
12 * Many architectures/machines will still pass the test even if you remove the
13 * atomic functions. X86 in particular has strong memory ordering by default
14 * so that should always pass (if you use volatile). However Aarch64
15 * (Raspberry Pi 3 Model B) has been observed to fail without the atomic
16 * functions.
17 *
18 * A failure can occur if an update to seq_n is not made globally visible by
19 * the time the next thread needs to use it.
20 */
21
22 #include <stdint.h>
23 #include <pthread.h>
24 #include "tst_test.h"
25 #include "tst_atomic.h"
26
27 #define THREADS 64
28 #define FILLER (1 << 20)
29
30 /* Uncomment these to see what happens without atomics. To prevent the compiler
31 * from removing/reording atomic and seq_n, mark them as volatile.
32 */
33 /* #define tst_atomic_load(v) (*(v)) */
34 /* #define tst_atomic_store(i, v) *(v) = (i) */
35
36 struct block {
37 int seq_n;
38 intptr_t id;
39 intptr_t filler[FILLER];
40 };
41
42 static int atomic;
43 /* Instead of storing seq_n on the stack (probably next to the atomic variable
44 * above), we store it in the middle of some anonymous mapped memory and keep
45 * a pointer to it. This should decrease the probability that the value of
46 * seq_n will be synchronised between processors as a byproduct of the atomic
47 * variable being updated.
48 */
49 static int *seq_n;
50 static struct block *m;
51
worker_load_store(void * aid)52 static void *worker_load_store(void *aid)
53 {
54 int id = (intptr_t)aid, i;
55
56 for (i = tst_atomic_load(&atomic);
57 i != id;
58 i = tst_atomic_load(&atomic))
59 ;
60
61 (m + (*seq_n))->id = id;
62 *seq_n += 1;
63 tst_atomic_store(i + 1, &atomic);
64
65 return NULL;
66 }
67
68 /* Attempt to stress the memory transport so that memory operations are
69 * contended and less predictable. This should increase the likelyhood of a
70 * failure if a memory fence is missing.
71 */
mem_spam(void * vp LTP_ATTRIBUTE_UNUSED)72 static void *mem_spam(void *vp LTP_ATTRIBUTE_UNUSED)
73 {
74 intptr_t i = 0, j;
75 struct block *cur = m;
76
77 tst_res(TINFO, "Memory spammer started");
78 while (tst_atomic_load(&atomic) > 0) {
79 for (j = 0; j < FILLER; j++)
80 cur->filler[j] = j;
81
82 if (i < THREADS - 1) {
83 cur = m + (++i);
84 } else {
85 i = 0;
86 cur = m;
87 }
88 }
89
90 return NULL;
91 }
92
do_test(void)93 static void do_test(void)
94 {
95 intptr_t i, id;
96 pthread_t threads[THREADS + 1];
97
98 atomic = 0;
99 m = SAFE_MMAP(NULL, sizeof(*m) * THREADS,
100 PROT_READ | PROT_WRITE,
101 MAP_PRIVATE | MAP_ANONYMOUS,
102 -1, 0);
103 seq_n = &((m + THREADS / 2)->seq_n);
104
105 pthread_create(&threads[THREADS], NULL, mem_spam, NULL);
106 for (i = THREADS - 1; i >= 0; i--)
107 pthread_create(&threads[i], NULL, worker_load_store, (void *)i);
108
109 for (i = 0; i < THREADS; i++) {
110 tst_res(TINFO, "Joining thread %li", i);
111 pthread_join(threads[i], NULL);
112 }
113 tst_atomic_store(-1, &atomic);
114 pthread_join(threads[THREADS], NULL);
115
116 tst_res(TINFO, "Expected\tFound");
117 for (i = 0; i < THREADS; i++) {
118 id = (m + i)->id;
119 if (id != i)
120 tst_res(TFAIL, "%d\t\t%d", (int)i, (int)id);
121 else
122 tst_res(TPASS, "%d\t\t%d", (int)i, (int)id);
123 }
124
125 SAFE_MUNMAP(m, sizeof(*m) * THREADS);
126 }
127
128 static struct tst_test test = {
129 .test_all = do_test,
130 };
131