• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef _LINUX_RCULIST_H
2 #define _LINUX_RCULIST_H
3 
4 #ifdef __KERNEL__
5 
6 /*
7  * RCU-protected list version
8  */
9 #include <linux/list.h>
10 #include <linux/rcupdate.h>
11 
12 /*
13  * Why is there no list_empty_rcu()?  Because list_empty() serves this
14  * purpose.  The list_empty() function fetches the RCU-protected pointer
15  * and compares it to the address of the list head, but neither dereferences
16  * this pointer itself nor provides this pointer to the caller.  Therefore,
17  * it is not necessary to use rcu_dereference(), so that list_empty() can
18  * be used anywhere you would want to use a list_empty_rcu().
19  */
20 
21 /*
22  * return the ->next pointer of a list_head in an rcu safe
23  * way, we must not access it directly
24  */
25 #define list_next_rcu(list)	(*((struct list_head __rcu **)(&(list)->next)))
26 
27 /*
28  * Insert a new entry between two known consecutive entries.
29  *
30  * This is only for internal list manipulation where we know
31  * the prev/next entries already!
32  */
33 #ifndef CONFIG_DEBUG_LIST
__list_add_rcu(struct list_head * new,struct list_head * prev,struct list_head * next)34 static inline void __list_add_rcu(struct list_head *new,
35 		struct list_head *prev, struct list_head *next)
36 {
37 	new->next = next;
38 	new->prev = prev;
39 	rcu_assign_pointer(list_next_rcu(prev), new);
40 	next->prev = new;
41 }
42 #else
43 extern void __list_add_rcu(struct list_head *new,
44 		struct list_head *prev, struct list_head *next);
45 #endif
46 
47 /**
48  * list_add_rcu - add a new entry to rcu-protected list
49  * @new: new entry to be added
50  * @head: list head to add it after
51  *
52  * Insert a new entry after the specified head.
53  * This is good for implementing stacks.
54  *
55  * The caller must take whatever precautions are necessary
56  * (such as holding appropriate locks) to avoid racing
57  * with another list-mutation primitive, such as list_add_rcu()
58  * or list_del_rcu(), running on this same list.
59  * However, it is perfectly legal to run concurrently with
60  * the _rcu list-traversal primitives, such as
61  * list_for_each_entry_rcu().
62  */
list_add_rcu(struct list_head * new,struct list_head * head)63 static inline void list_add_rcu(struct list_head *new, struct list_head *head)
64 {
65 	__list_add_rcu(new, head, head->next);
66 }
67 
68 /**
69  * list_add_tail_rcu - add a new entry to rcu-protected list
70  * @new: new entry to be added
71  * @head: list head to add it before
72  *
73  * Insert a new entry before the specified head.
74  * This is useful for implementing queues.
75  *
76  * The caller must take whatever precautions are necessary
77  * (such as holding appropriate locks) to avoid racing
78  * with another list-mutation primitive, such as list_add_tail_rcu()
79  * or list_del_rcu(), running on this same list.
80  * However, it is perfectly legal to run concurrently with
81  * the _rcu list-traversal primitives, such as
82  * list_for_each_entry_rcu().
83  */
list_add_tail_rcu(struct list_head * new,struct list_head * head)84 static inline void list_add_tail_rcu(struct list_head *new,
85 					struct list_head *head)
86 {
87 	__list_add_rcu(new, head->prev, head);
88 }
89 
90 /**
91  * list_del_rcu - deletes entry from list without re-initialization
92  * @entry: the element to delete from the list.
93  *
94  * Note: list_empty() on entry does not return true after this,
95  * the entry is in an undefined state. It is useful for RCU based
96  * lockfree traversal.
97  *
98  * In particular, it means that we can not poison the forward
99  * pointers that may still be used for walking the list.
100  *
101  * The caller must take whatever precautions are necessary
102  * (such as holding appropriate locks) to avoid racing
103  * with another list-mutation primitive, such as list_del_rcu()
104  * or list_add_rcu(), running on this same list.
105  * However, it is perfectly legal to run concurrently with
106  * the _rcu list-traversal primitives, such as
107  * list_for_each_entry_rcu().
108  *
109  * Note that the caller is not permitted to immediately free
110  * the newly deleted entry.  Instead, either synchronize_rcu()
111  * or call_rcu() must be used to defer freeing until an RCU
112  * grace period has elapsed.
113  */
list_del_rcu(struct list_head * entry)114 static inline void list_del_rcu(struct list_head *entry)
115 {
116 	__list_del_entry(entry);
117 	entry->prev = LIST_POISON2;
118 }
119 
120 /**
121  * hlist_del_init_rcu - deletes entry from hash list with re-initialization
122  * @n: the element to delete from the hash list.
123  *
124  * Note: list_unhashed() on the node return true after this. It is
125  * useful for RCU based read lockfree traversal if the writer side
126  * must know if the list entry is still hashed or already unhashed.
127  *
128  * In particular, it means that we can not poison the forward pointers
129  * that may still be used for walking the hash list and we can only
130  * zero the pprev pointer so list_unhashed() will return true after
131  * this.
132  *
133  * The caller must take whatever precautions are necessary (such as
134  * holding appropriate locks) to avoid racing with another
135  * list-mutation primitive, such as hlist_add_head_rcu() or
136  * hlist_del_rcu(), running on this same list.  However, it is
137  * perfectly legal to run concurrently with the _rcu list-traversal
138  * primitives, such as hlist_for_each_entry_rcu().
139  */
hlist_del_init_rcu(struct hlist_node * n)140 static inline void hlist_del_init_rcu(struct hlist_node *n)
141 {
142 	if (!hlist_unhashed(n)) {
143 		__hlist_del(n);
144 		n->pprev = NULL;
145 	}
146 }
147 
148 /**
149  * list_replace_rcu - replace old entry by new one
150  * @old : the element to be replaced
151  * @new : the new element to insert
152  *
153  * The @old entry will be replaced with the @new entry atomically.
154  * Note: @old should not be empty.
155  */
list_replace_rcu(struct list_head * old,struct list_head * new)156 static inline void list_replace_rcu(struct list_head *old,
157 				struct list_head *new)
158 {
159 	new->next = old->next;
160 	new->prev = old->prev;
161 	rcu_assign_pointer(list_next_rcu(new->prev), new);
162 	new->next->prev = new;
163 	old->prev = LIST_POISON2;
164 }
165 
166 /**
167  * list_splice_init_rcu - splice an RCU-protected list into an existing list.
168  * @list:	the RCU-protected list to splice
169  * @head:	the place in the list to splice the first list into
170  * @sync:	function to sync: synchronize_rcu(), synchronize_sched(), ...
171  *
172  * @head can be RCU-read traversed concurrently with this function.
173  *
174  * Note that this function blocks.
175  *
176  * Important note: the caller must take whatever action is necessary to
177  *	prevent any other updates to @head.  In principle, it is possible
178  *	to modify the list as soon as sync() begins execution.
179  *	If this sort of thing becomes necessary, an alternative version
180  *	based on call_rcu() could be created.  But only if -really-
181  *	needed -- there is no shortage of RCU API members.
182  */
list_splice_init_rcu(struct list_head * list,struct list_head * head,void (* sync)(void))183 static inline void list_splice_init_rcu(struct list_head *list,
184 					struct list_head *head,
185 					void (*sync)(void))
186 {
187 	struct list_head *first = list->next;
188 	struct list_head *last = list->prev;
189 	struct list_head *at = head->next;
190 
191 	if (list_empty(list))
192 		return;
193 
194 	/* "first" and "last" tracking list, so initialize it. */
195 
196 	INIT_LIST_HEAD(list);
197 
198 	/*
199 	 * At this point, the list body still points to the source list.
200 	 * Wait for any readers to finish using the list before splicing
201 	 * the list body into the new list.  Any new readers will see
202 	 * an empty list.
203 	 */
204 
205 	sync();
206 
207 	/*
208 	 * Readers are finished with the source list, so perform splice.
209 	 * The order is important if the new list is global and accessible
210 	 * to concurrent RCU readers.  Note that RCU readers are not
211 	 * permitted to traverse the prev pointers without excluding
212 	 * this function.
213 	 */
214 
215 	last->next = at;
216 	rcu_assign_pointer(list_next_rcu(head), first);
217 	first->prev = head;
218 	at->prev = last;
219 }
220 
221 /**
222  * list_entry_rcu - get the struct for this entry
223  * @ptr:        the &struct list_head pointer.
224  * @type:       the type of the struct this is embedded in.
225  * @member:     the name of the list_struct within the struct.
226  *
227  * This primitive may safely run concurrently with the _rcu list-mutation
228  * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
229  */
230 #define list_entry_rcu(ptr, type, member) \
231 	({typeof (*ptr) __rcu *__ptr = (typeof (*ptr) __rcu __force *)ptr; \
232 	 container_of((typeof(ptr))rcu_dereference_raw(__ptr), type, member); \
233 	})
234 
235 /**
236  * Where are list_empty_rcu() and list_first_entry_rcu()?
237  *
238  * Implementing those functions following their counterparts list_empty() and
239  * list_first_entry() is not advisable because they lead to subtle race
240  * conditions as the following snippet shows:
241  *
242  * if (!list_empty_rcu(mylist)) {
243  *	struct foo *bar = list_first_entry_rcu(mylist, struct foo, list_member);
244  *	do_something(bar);
245  * }
246  *
247  * The list may not be empty when list_empty_rcu checks it, but it may be when
248  * list_first_entry_rcu rereads the ->next pointer.
249  *
250  * Rereading the ->next pointer is not a problem for list_empty() and
251  * list_first_entry() because they would be protected by a lock that blocks
252  * writers.
253  *
254  * See list_first_or_null_rcu for an alternative.
255  */
256 
257 /**
258  * list_first_or_null_rcu - get the first element from a list
259  * @ptr:        the list head to take the element from.
260  * @type:       the type of the struct this is embedded in.
261  * @member:     the name of the list_struct within the struct.
262  *
263  * Note that if the list is empty, it returns NULL.
264  *
265  * This primitive may safely run concurrently with the _rcu list-mutation
266  * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
267  */
268 #define list_first_or_null_rcu(ptr, type, member) \
269 	({struct list_head *__ptr = (ptr); \
270 	  struct list_head __rcu *__next = list_next_rcu(__ptr); \
271 	  likely(__ptr != __next) ? container_of(__next, type, member) : NULL; \
272 	})
273 
274 /**
275  * list_for_each_entry_rcu	-	iterate over rcu list of given type
276  * @pos:	the type * to use as a loop cursor.
277  * @head:	the head for your list.
278  * @member:	the name of the list_struct within the struct.
279  *
280  * This list-traversal primitive may safely run concurrently with
281  * the _rcu list-mutation primitives such as list_add_rcu()
282  * as long as the traversal is guarded by rcu_read_lock().
283  */
284 #define list_for_each_entry_rcu(pos, head, member) \
285 	for (pos = list_entry_rcu((head)->next, typeof(*pos), member); \
286 		&pos->member != (head); \
287 		pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
288 
289 /**
290  * list_for_each_entry_continue_rcu - continue iteration over list of given type
291  * @pos:	the type * to use as a loop cursor.
292  * @head:	the head for your list.
293  * @member:	the name of the list_struct within the struct.
294  *
295  * Continue to iterate over list of given type, continuing after
296  * the current position.
297  */
298 #define list_for_each_entry_continue_rcu(pos, head, member) 		\
299 	for (pos = list_entry_rcu(pos->member.next, typeof(*pos), member); \
300 	     &pos->member != (head);	\
301 	     pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
302 
303 /**
304  * hlist_del_rcu - deletes entry from hash list without re-initialization
305  * @n: the element to delete from the hash list.
306  *
307  * Note: list_unhashed() on entry does not return true after this,
308  * the entry is in an undefined state. It is useful for RCU based
309  * lockfree traversal.
310  *
311  * In particular, it means that we can not poison the forward
312  * pointers that may still be used for walking the hash list.
313  *
314  * The caller must take whatever precautions are necessary
315  * (such as holding appropriate locks) to avoid racing
316  * with another list-mutation primitive, such as hlist_add_head_rcu()
317  * or hlist_del_rcu(), running on this same list.
318  * However, it is perfectly legal to run concurrently with
319  * the _rcu list-traversal primitives, such as
320  * hlist_for_each_entry().
321  */
hlist_del_rcu(struct hlist_node * n)322 static inline void hlist_del_rcu(struct hlist_node *n)
323 {
324 	__hlist_del(n);
325 	n->pprev = LIST_POISON2;
326 }
327 
328 /**
329  * hlist_replace_rcu - replace old entry by new one
330  * @old : the element to be replaced
331  * @new : the new element to insert
332  *
333  * The @old entry will be replaced with the @new entry atomically.
334  */
hlist_replace_rcu(struct hlist_node * old,struct hlist_node * new)335 static inline void hlist_replace_rcu(struct hlist_node *old,
336 					struct hlist_node *new)
337 {
338 	struct hlist_node *next = old->next;
339 
340 	new->next = next;
341 	new->pprev = old->pprev;
342 	rcu_assign_pointer(*(struct hlist_node __rcu **)new->pprev, new);
343 	if (next)
344 		new->next->pprev = &new->next;
345 	old->pprev = LIST_POISON2;
346 }
347 
348 /*
349  * return the first or the next element in an RCU protected hlist
350  */
351 #define hlist_first_rcu(head)	(*((struct hlist_node __rcu **)(&(head)->first)))
352 #define hlist_next_rcu(node)	(*((struct hlist_node __rcu **)(&(node)->next)))
353 #define hlist_pprev_rcu(node)	(*((struct hlist_node __rcu **)((node)->pprev)))
354 
355 /**
356  * hlist_add_head_rcu
357  * @n: the element to add to the hash list.
358  * @h: the list to add to.
359  *
360  * Description:
361  * Adds the specified element to the specified hlist,
362  * while permitting racing traversals.
363  *
364  * The caller must take whatever precautions are necessary
365  * (such as holding appropriate locks) to avoid racing
366  * with another list-mutation primitive, such as hlist_add_head_rcu()
367  * or hlist_del_rcu(), running on this same list.
368  * However, it is perfectly legal to run concurrently with
369  * the _rcu list-traversal primitives, such as
370  * hlist_for_each_entry_rcu(), used to prevent memory-consistency
371  * problems on Alpha CPUs.  Regardless of the type of CPU, the
372  * list-traversal primitive must be guarded by rcu_read_lock().
373  */
hlist_add_head_rcu(struct hlist_node * n,struct hlist_head * h)374 static inline void hlist_add_head_rcu(struct hlist_node *n,
375 					struct hlist_head *h)
376 {
377 	struct hlist_node *first = h->first;
378 
379 	n->next = first;
380 	n->pprev = &h->first;
381 	rcu_assign_pointer(hlist_first_rcu(h), n);
382 	if (first)
383 		first->pprev = &n->next;
384 }
385 
386 /**
387  * hlist_add_before_rcu
388  * @n: the new element to add to the hash list.
389  * @next: the existing element to add the new element before.
390  *
391  * Description:
392  * Adds the specified element to the specified hlist
393  * before the specified node while permitting racing traversals.
394  *
395  * The caller must take whatever precautions are necessary
396  * (such as holding appropriate locks) to avoid racing
397  * with another list-mutation primitive, such as hlist_add_head_rcu()
398  * or hlist_del_rcu(), running on this same list.
399  * However, it is perfectly legal to run concurrently with
400  * the _rcu list-traversal primitives, such as
401  * hlist_for_each_entry_rcu(), used to prevent memory-consistency
402  * problems on Alpha CPUs.
403  */
hlist_add_before_rcu(struct hlist_node * n,struct hlist_node * next)404 static inline void hlist_add_before_rcu(struct hlist_node *n,
405 					struct hlist_node *next)
406 {
407 	n->pprev = next->pprev;
408 	n->next = next;
409 	rcu_assign_pointer(hlist_pprev_rcu(n), n);
410 	next->pprev = &n->next;
411 }
412 
413 /**
414  * hlist_add_after_rcu
415  * @prev: the existing element to add the new element after.
416  * @n: the new element to add to the hash list.
417  *
418  * Description:
419  * Adds the specified element to the specified hlist
420  * after the specified node while permitting racing traversals.
421  *
422  * The caller must take whatever precautions are necessary
423  * (such as holding appropriate locks) to avoid racing
424  * with another list-mutation primitive, such as hlist_add_head_rcu()
425  * or hlist_del_rcu(), running on this same list.
426  * However, it is perfectly legal to run concurrently with
427  * the _rcu list-traversal primitives, such as
428  * hlist_for_each_entry_rcu(), used to prevent memory-consistency
429  * problems on Alpha CPUs.
430  */
hlist_add_after_rcu(struct hlist_node * prev,struct hlist_node * n)431 static inline void hlist_add_after_rcu(struct hlist_node *prev,
432 				       struct hlist_node *n)
433 {
434 	n->next = prev->next;
435 	n->pprev = &prev->next;
436 	rcu_assign_pointer(hlist_next_rcu(prev), n);
437 	if (n->next)
438 		n->next->pprev = &n->next;
439 }
440 
441 #define __hlist_for_each_rcu(pos, head)				\
442 	for (pos = rcu_dereference(hlist_first_rcu(head));	\
443 	     pos;						\
444 	     pos = rcu_dereference(hlist_next_rcu(pos)))
445 
446 /**
447  * hlist_for_each_entry_rcu - iterate over rcu list of given type
448  * @pos:	the type * to use as a loop cursor.
449  * @head:	the head for your list.
450  * @member:	the name of the hlist_node within the struct.
451  *
452  * This list-traversal primitive may safely run concurrently with
453  * the _rcu list-mutation primitives such as hlist_add_head_rcu()
454  * as long as the traversal is guarded by rcu_read_lock().
455  */
456 #define hlist_for_each_entry_rcu(pos, head, member)			\
457 	for (pos = hlist_entry_safe (rcu_dereference_raw(hlist_first_rcu(head)),\
458 			typeof(*(pos)), member);			\
459 		pos;							\
460 		pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\
461 			&(pos)->member)), typeof(*(pos)), member))
462 
463 /**
464  * hlist_for_each_entry_rcu_notrace - iterate over rcu list of given type (for tracing)
465  * @pos:	the type * to use as a loop cursor.
466  * @head:	the head for your list.
467  * @member:	the name of the hlist_node within the struct.
468  *
469  * This list-traversal primitive may safely run concurrently with
470  * the _rcu list-mutation primitives such as hlist_add_head_rcu()
471  * as long as the traversal is guarded by rcu_read_lock().
472  *
473  * This is the same as hlist_for_each_entry_rcu() except that it does
474  * not do any RCU debugging or tracing.
475  */
476 #define hlist_for_each_entry_rcu_notrace(pos, head, member)			\
477 	for (pos = hlist_entry_safe (rcu_dereference_raw_notrace(hlist_first_rcu(head)),\
478 			typeof(*(pos)), member);			\
479 		pos;							\
480 		pos = hlist_entry_safe(rcu_dereference_raw_notrace(hlist_next_rcu(\
481 			&(pos)->member)), typeof(*(pos)), member))
482 
483 /**
484  * hlist_for_each_entry_rcu_bh - iterate over rcu list of given type
485  * @pos:	the type * to use as a loop cursor.
486  * @head:	the head for your list.
487  * @member:	the name of the hlist_node within the struct.
488  *
489  * This list-traversal primitive may safely run concurrently with
490  * the _rcu list-mutation primitives such as hlist_add_head_rcu()
491  * as long as the traversal is guarded by rcu_read_lock().
492  */
493 #define hlist_for_each_entry_rcu_bh(pos, head, member)			\
494 	for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_first_rcu(head)),\
495 			typeof(*(pos)), member);			\
496 		pos;							\
497 		pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu(\
498 			&(pos)->member)), typeof(*(pos)), member))
499 
500 /**
501  * hlist_for_each_entry_continue_rcu - iterate over a hlist continuing after current point
502  * @pos:	the type * to use as a loop cursor.
503  * @member:	the name of the hlist_node within the struct.
504  */
505 #define hlist_for_each_entry_continue_rcu(pos, member)			\
506 	for (pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\
507 			typeof(*(pos)), member);			\
508 	     pos;							\
509 	     pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\
510 			typeof(*(pos)), member))
511 
512 /**
513  * hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point
514  * @pos:	the type * to use as a loop cursor.
515  * @member:	the name of the hlist_node within the struct.
516  */
517 #define hlist_for_each_entry_continue_rcu_bh(pos, member)		\
518 	for (pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\
519 			typeof(*(pos)), member);			\
520 	     pos;							\
521 	     pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\
522 			typeof(*(pos)), member))
523 
524 
525 #endif	/* __KERNEL__ */
526 #endif
527