1 // SPDX-License-Identifier: LGPL-2.1
2 #define _GNU_SOURCE
3 #include <assert.h>
4 #include <pthread.h>
5 #include <sched.h>
6 #include <stdint.h>
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <string.h>
10 #include <stddef.h>
11
12 #include "../kselftest.h"
13 #include "rseq.h"
14
15 #ifdef BUILDOPT_RSEQ_PERCPU_MM_CID
16 # define RSEQ_PERCPU RSEQ_PERCPU_MM_CID
17 static
get_current_cpu_id(void)18 int get_current_cpu_id(void)
19 {
20 return rseq_current_mm_cid();
21 }
22 static
rseq_validate_cpu_id(void)23 bool rseq_validate_cpu_id(void)
24 {
25 return rseq_mm_cid_available();
26 }
27 #else
28 # define RSEQ_PERCPU RSEQ_PERCPU_CPU_ID
29 static
get_current_cpu_id(void)30 int get_current_cpu_id(void)
31 {
32 return rseq_cpu_start();
33 }
34 static
rseq_validate_cpu_id(void)35 bool rseq_validate_cpu_id(void)
36 {
37 return rseq_current_cpu_raw() >= 0;
38 }
39 #endif
40
41 struct percpu_lock_entry {
42 intptr_t v;
43 } __attribute__((aligned(128)));
44
45 struct percpu_lock {
46 struct percpu_lock_entry c[CPU_SETSIZE];
47 };
48
49 struct test_data_entry {
50 intptr_t count;
51 } __attribute__((aligned(128)));
52
53 struct spinlock_test_data {
54 struct percpu_lock lock;
55 struct test_data_entry c[CPU_SETSIZE];
56 int reps;
57 };
58
59 struct percpu_list_node {
60 intptr_t data;
61 struct percpu_list_node *next;
62 };
63
64 struct percpu_list_entry {
65 struct percpu_list_node *head;
66 } __attribute__((aligned(128)));
67
68 struct percpu_list {
69 struct percpu_list_entry c[CPU_SETSIZE];
70 };
71
72 /* A simple percpu spinlock. Returns the cpu lock was acquired on. */
rseq_this_cpu_lock(struct percpu_lock * lock)73 int rseq_this_cpu_lock(struct percpu_lock *lock)
74 {
75 int cpu;
76
77 for (;;) {
78 int ret;
79
80 cpu = get_current_cpu_id();
81 ret = rseq_cmpeqv_storev(RSEQ_MO_RELAXED, RSEQ_PERCPU,
82 &lock->c[cpu].v, 0, 1, cpu);
83 if (rseq_likely(!ret))
84 break;
85 /* Retry if comparison fails or rseq aborts. */
86 }
87 /*
88 * Acquire semantic when taking lock after control dependency.
89 * Matches rseq_smp_store_release().
90 */
91 rseq_smp_acquire__after_ctrl_dep();
92 return cpu;
93 }
94
rseq_percpu_unlock(struct percpu_lock * lock,int cpu)95 void rseq_percpu_unlock(struct percpu_lock *lock, int cpu)
96 {
97 assert(lock->c[cpu].v == 1);
98 /*
99 * Release lock, with release semantic. Matches
100 * rseq_smp_acquire__after_ctrl_dep().
101 */
102 rseq_smp_store_release(&lock->c[cpu].v, 0);
103 }
104
test_percpu_spinlock_thread(void * arg)105 void *test_percpu_spinlock_thread(void *arg)
106 {
107 struct spinlock_test_data *data = arg;
108 int i, cpu;
109
110 if (rseq_register_current_thread()) {
111 fprintf(stderr, "Error: rseq_register_current_thread(...) failed(%d): %s\n",
112 errno, strerror(errno));
113 abort();
114 }
115 for (i = 0; i < data->reps; i++) {
116 cpu = rseq_this_cpu_lock(&data->lock);
117 data->c[cpu].count++;
118 rseq_percpu_unlock(&data->lock, cpu);
119 }
120 if (rseq_unregister_current_thread()) {
121 fprintf(stderr, "Error: rseq_unregister_current_thread(...) failed(%d): %s\n",
122 errno, strerror(errno));
123 abort();
124 }
125
126 return NULL;
127 }
128
129 /*
130 * A simple test which implements a sharded counter using a per-cpu
131 * lock. Obviously real applications might prefer to simply use a
132 * per-cpu increment; however, this is reasonable for a test and the
133 * lock can be extended to synchronize more complicated operations.
134 */
test_percpu_spinlock(void)135 void test_percpu_spinlock(void)
136 {
137 const int num_threads = 200;
138 int i;
139 uint64_t sum;
140 pthread_t test_threads[num_threads];
141 struct spinlock_test_data data;
142
143 memset(&data, 0, sizeof(data));
144 data.reps = 5000;
145
146 for (i = 0; i < num_threads; i++)
147 pthread_create(&test_threads[i], NULL,
148 test_percpu_spinlock_thread, &data);
149
150 for (i = 0; i < num_threads; i++)
151 pthread_join(test_threads[i], NULL);
152
153 sum = 0;
154 for (i = 0; i < CPU_SETSIZE; i++)
155 sum += data.c[i].count;
156
157 assert(sum == (uint64_t)data.reps * num_threads);
158 }
159
this_cpu_list_push(struct percpu_list * list,struct percpu_list_node * node,int * _cpu)160 void this_cpu_list_push(struct percpu_list *list,
161 struct percpu_list_node *node,
162 int *_cpu)
163 {
164 int cpu;
165
166 for (;;) {
167 intptr_t *targetptr, newval, expect;
168 int ret;
169
170 cpu = get_current_cpu_id();
171 /* Load list->c[cpu].head with single-copy atomicity. */
172 expect = (intptr_t)RSEQ_READ_ONCE(list->c[cpu].head);
173 newval = (intptr_t)node;
174 targetptr = (intptr_t *)&list->c[cpu].head;
175 node->next = (struct percpu_list_node *)expect;
176 ret = rseq_cmpeqv_storev(RSEQ_MO_RELAXED, RSEQ_PERCPU,
177 targetptr, expect, newval, cpu);
178 if (rseq_likely(!ret))
179 break;
180 /* Retry if comparison fails or rseq aborts. */
181 }
182 if (_cpu)
183 *_cpu = cpu;
184 }
185
186 /*
187 * Unlike a traditional lock-less linked list; the availability of a
188 * rseq primitive allows us to implement pop without concerns over
189 * ABA-type races.
190 */
this_cpu_list_pop(struct percpu_list * list,int * _cpu)191 struct percpu_list_node *this_cpu_list_pop(struct percpu_list *list,
192 int *_cpu)
193 {
194 for (;;) {
195 struct percpu_list_node *head;
196 intptr_t *targetptr, expectnot, *load;
197 long offset;
198 int ret, cpu;
199
200 cpu = get_current_cpu_id();
201 targetptr = (intptr_t *)&list->c[cpu].head;
202 expectnot = (intptr_t)NULL;
203 offset = offsetof(struct percpu_list_node, next);
204 load = (intptr_t *)&head;
205 ret = rseq_cmpnev_storeoffp_load(RSEQ_MO_RELAXED, RSEQ_PERCPU,
206 targetptr, expectnot,
207 offset, load, cpu);
208 if (rseq_likely(!ret)) {
209 if (_cpu)
210 *_cpu = cpu;
211 return head;
212 }
213 if (ret > 0)
214 return NULL;
215 /* Retry if rseq aborts. */
216 }
217 }
218
219 /*
220 * __percpu_list_pop is not safe against concurrent accesses. Should
221 * only be used on lists that are not concurrently modified.
222 */
__percpu_list_pop(struct percpu_list * list,int cpu)223 struct percpu_list_node *__percpu_list_pop(struct percpu_list *list, int cpu)
224 {
225 struct percpu_list_node *node;
226
227 node = list->c[cpu].head;
228 if (!node)
229 return NULL;
230 list->c[cpu].head = node->next;
231 return node;
232 }
233
test_percpu_list_thread(void * arg)234 void *test_percpu_list_thread(void *arg)
235 {
236 int i;
237 struct percpu_list *list = (struct percpu_list *)arg;
238
239 if (rseq_register_current_thread()) {
240 fprintf(stderr, "Error: rseq_register_current_thread(...) failed(%d): %s\n",
241 errno, strerror(errno));
242 abort();
243 }
244
245 for (i = 0; i < 100000; i++) {
246 struct percpu_list_node *node;
247
248 node = this_cpu_list_pop(list, NULL);
249 sched_yield(); /* encourage shuffling */
250 if (node)
251 this_cpu_list_push(list, node, NULL);
252 }
253
254 if (rseq_unregister_current_thread()) {
255 fprintf(stderr, "Error: rseq_unregister_current_thread(...) failed(%d): %s\n",
256 errno, strerror(errno));
257 abort();
258 }
259
260 return NULL;
261 }
262
263 /* Simultaneous modification to a per-cpu linked list from many threads. */
test_percpu_list(void)264 void test_percpu_list(void)
265 {
266 int i, j;
267 uint64_t sum = 0, expected_sum = 0;
268 struct percpu_list list;
269 pthread_t test_threads[200];
270 cpu_set_t allowed_cpus;
271
272 memset(&list, 0, sizeof(list));
273
274 /* Generate list entries for every usable cpu. */
275 sched_getaffinity(0, sizeof(allowed_cpus), &allowed_cpus);
276 for (i = 0; i < CPU_SETSIZE; i++) {
277 if (!CPU_ISSET(i, &allowed_cpus))
278 continue;
279 for (j = 1; j <= 100; j++) {
280 struct percpu_list_node *node;
281
282 expected_sum += j;
283
284 node = malloc(sizeof(*node));
285 assert(node);
286 node->data = j;
287 node->next = list.c[i].head;
288 list.c[i].head = node;
289 }
290 }
291
292 for (i = 0; i < 200; i++)
293 pthread_create(&test_threads[i], NULL,
294 test_percpu_list_thread, &list);
295
296 for (i = 0; i < 200; i++)
297 pthread_join(test_threads[i], NULL);
298
299 for (i = 0; i < CPU_SETSIZE; i++) {
300 struct percpu_list_node *node;
301
302 if (!CPU_ISSET(i, &allowed_cpus))
303 continue;
304
305 while ((node = __percpu_list_pop(&list, i))) {
306 sum += node->data;
307 free(node);
308 }
309 }
310
311 /*
312 * All entries should now be accounted for (unless some external
313 * actor is interfering with our allowed affinity while this
314 * test is running).
315 */
316 assert(sum == expected_sum);
317 }
318
main(int argc,char ** argv)319 int main(int argc, char **argv)
320 {
321 if (rseq_register_current_thread()) {
322 fprintf(stderr, "Error: rseq_register_current_thread(...) failed(%d): %s\n",
323 errno, strerror(errno));
324 goto error;
325 }
326 if (!rseq_validate_cpu_id()) {
327 fprintf(stderr, "Error: cpu id getter unavailable\n");
328 goto error;
329 }
330 printf("spinlock\n");
331 test_percpu_spinlock();
332 printf("percpu_list\n");
333 test_percpu_list();
334 if (rseq_unregister_current_thread()) {
335 fprintf(stderr, "Error: rseq_unregister_current_thread(...) failed(%d): %s\n",
336 errno, strerror(errno));
337 goto error;
338 }
339 return 0;
340
341 error:
342 return -1;
343 }
344