• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2001 Momchil Velikov
3  * Portions Copyright (C) 2001 Christoph Hellwig
4  * Copyright (C) 2006 Nick Piggin
5  * Copyright (C) 2012 Konstantin Khlebnikov
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License as
9  * published by the Free Software Foundation; either version 2, or (at
10  * your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  */
21 #ifndef _LINUX_RADIX_TREE_H
22 #define _LINUX_RADIX_TREE_H
23 
24 #include <linux/bitops.h>
25 #include <linux/preempt.h>
26 #include <linux/types.h>
27 #include <linux/bug.h>
28 #include <linux/kernel.h>
29 #include <linux/rcupdate.h>
30 
31 /*
32  * The bottom two bits of the slot determine how the remaining bits in the
33  * slot are interpreted:
34  *
35  * 00 - data pointer
36  * 01 - internal entry
37  * 10 - exceptional entry
38  * 11 - this bit combination is currently unused/reserved
39  *
40  * The internal entry may be a pointer to the next level in the tree, a
41  * sibling entry, or an indicator that the entry in this slot has been moved
42  * to another location in the tree and the lookup should be restarted.  While
43  * NULL fits the 'data pointer' pattern, it means that there is no entry in
44  * the tree for this index (no matter what level of the tree it is found at).
45  * This means that you cannot store NULL in the tree as a value for the index.
46  */
47 #define RADIX_TREE_ENTRY_MASK		3UL
48 #define RADIX_TREE_INTERNAL_NODE	1UL
49 
50 /*
51  * Most users of the radix tree store pointers but shmem/tmpfs stores swap
52  * entries in the same tree.  They are marked as exceptional entries to
53  * distinguish them from pointers to struct page.
54  * EXCEPTIONAL_ENTRY tests the bit, EXCEPTIONAL_SHIFT shifts content past it.
55  */
56 #define RADIX_TREE_EXCEPTIONAL_ENTRY	2
57 #define RADIX_TREE_EXCEPTIONAL_SHIFT	2
58 
radix_tree_is_internal_node(void * ptr)59 static inline bool radix_tree_is_internal_node(void *ptr)
60 {
61 	return ((unsigned long)ptr & RADIX_TREE_ENTRY_MASK) ==
62 				RADIX_TREE_INTERNAL_NODE;
63 }
64 
65 /*** radix-tree API starts here ***/
66 
67 #define RADIX_TREE_MAX_TAGS 3
68 
69 #ifndef RADIX_TREE_MAP_SHIFT
70 #define RADIX_TREE_MAP_SHIFT	(CONFIG_BASE_SMALL ? 4 : 6)
71 #endif
72 
73 #define RADIX_TREE_MAP_SIZE	(1UL << RADIX_TREE_MAP_SHIFT)
74 #define RADIX_TREE_MAP_MASK	(RADIX_TREE_MAP_SIZE-1)
75 
76 #define RADIX_TREE_TAG_LONGS	\
77 	((RADIX_TREE_MAP_SIZE + BITS_PER_LONG - 1) / BITS_PER_LONG)
78 
79 #define RADIX_TREE_INDEX_BITS  (8 /* CHAR_BIT */ * sizeof(unsigned long))
80 #define RADIX_TREE_MAX_PATH (DIV_ROUND_UP(RADIX_TREE_INDEX_BITS, \
81 					  RADIX_TREE_MAP_SHIFT))
82 
83 /* Internally used bits of node->count */
84 #define RADIX_TREE_COUNT_SHIFT	(RADIX_TREE_MAP_SHIFT + 1)
85 #define RADIX_TREE_COUNT_MASK	((1UL << RADIX_TREE_COUNT_SHIFT) - 1)
86 
87 struct radix_tree_node {
88 	unsigned char	shift;	/* Bits remaining in each slot */
89 	unsigned char	offset;	/* Slot offset in parent */
90 	unsigned int	count;
91 	union {
92 		struct {
93 			/* Used when ascending tree */
94 			struct radix_tree_node *parent;
95 			/* For tree user */
96 			void *private_data;
97 		};
98 		/* Used when freeing node */
99 		struct rcu_head	rcu_head;
100 	};
101 	/* For tree user */
102 	struct list_head private_list;
103 	void __rcu	*slots[RADIX_TREE_MAP_SIZE];
104 	unsigned long	tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS];
105 };
106 
107 /* root tags are stored in gfp_mask, shifted by __GFP_BITS_SHIFT */
108 struct radix_tree_root {
109 	gfp_t			gfp_mask;
110 	struct radix_tree_node	__rcu *rnode;
111 };
112 
113 #define RADIX_TREE_INIT(mask)	{					\
114 	.gfp_mask = (mask),						\
115 	.rnode = NULL,							\
116 }
117 
118 #define RADIX_TREE(name, mask) \
119 	struct radix_tree_root name = RADIX_TREE_INIT(mask)
120 
121 #define INIT_RADIX_TREE(root, mask)					\
122 do {									\
123 	(root)->gfp_mask = (mask);					\
124 	(root)->rnode = NULL;						\
125 } while (0)
126 
radix_tree_empty(struct radix_tree_root * root)127 static inline bool radix_tree_empty(struct radix_tree_root *root)
128 {
129 	return root->rnode == NULL;
130 }
131 
132 /**
133  * Radix-tree synchronization
134  *
135  * The radix-tree API requires that users provide all synchronisation (with
136  * specific exceptions, noted below).
137  *
138  * Synchronization of access to the data items being stored in the tree, and
139  * management of their lifetimes must be completely managed by API users.
140  *
141  * For API usage, in general,
142  * - any function _modifying_ the tree or tags (inserting or deleting
143  *   items, setting or clearing tags) must exclude other modifications, and
144  *   exclude any functions reading the tree.
145  * - any function _reading_ the tree or tags (looking up items or tags,
146  *   gang lookups) must exclude modifications to the tree, but may occur
147  *   concurrently with other readers.
148  *
149  * The notable exceptions to this rule are the following functions:
150  * __radix_tree_lookup
151  * radix_tree_lookup
152  * radix_tree_lookup_slot
153  * radix_tree_tag_get
154  * radix_tree_gang_lookup
155  * radix_tree_gang_lookup_slot
156  * radix_tree_gang_lookup_tag
157  * radix_tree_gang_lookup_tag_slot
158  * radix_tree_tagged
159  *
160  * The first 8 functions are able to be called locklessly, using RCU. The
161  * caller must ensure calls to these functions are made within rcu_read_lock()
162  * regions. Other readers (lock-free or otherwise) and modifications may be
163  * running concurrently.
164  *
165  * It is still required that the caller manage the synchronization and lifetimes
166  * of the items. So if RCU lock-free lookups are used, typically this would mean
167  * that the items have their own locks, or are amenable to lock-free access; and
168  * that the items are freed by RCU (or only freed after having been deleted from
169  * the radix tree *and* a synchronize_rcu() grace period).
170  *
171  * (Note, rcu_assign_pointer and rcu_dereference are not needed to control
172  * access to data items when inserting into or looking up from the radix tree)
173  *
174  * Note that the value returned by radix_tree_tag_get() may not be relied upon
175  * if only the RCU read lock is held.  Functions to set/clear tags and to
176  * delete nodes running concurrently with it may affect its result such that
177  * two consecutive reads in the same locked section may return different
178  * values.  If reliability is required, modification functions must also be
179  * excluded from concurrency.
180  *
181  * radix_tree_tagged is able to be called without locking or RCU.
182  */
183 
184 /**
185  * radix_tree_deref_slot	- dereference a slot
186  * @pslot:	pointer to slot, returned by radix_tree_lookup_slot
187  * Returns:	item that was stored in that slot with any direct pointer flag
188  *		removed.
189  *
190  * For use with radix_tree_lookup_slot().  Caller must hold tree at least read
191  * locked across slot lookup and dereference. Not required if write lock is
192  * held (ie. items cannot be concurrently inserted).
193  *
194  * radix_tree_deref_retry must be used to confirm validity of the pointer if
195  * only the read lock is held.
196  */
radix_tree_deref_slot(void ** pslot)197 static inline void *radix_tree_deref_slot(void **pslot)
198 {
199 	return rcu_dereference(*pslot);
200 }
201 
202 /**
203  * radix_tree_deref_slot_protected	- dereference a slot without RCU lock but with tree lock held
204  * @pslot:	pointer to slot, returned by radix_tree_lookup_slot
205  * Returns:	item that was stored in that slot with any direct pointer flag
206  *		removed.
207  *
208  * Similar to radix_tree_deref_slot but only used during migration when a pages
209  * mapping is being moved. The caller does not hold the RCU read lock but it
210  * must hold the tree lock to prevent parallel updates.
211  */
radix_tree_deref_slot_protected(void ** pslot,spinlock_t * treelock)212 static inline void *radix_tree_deref_slot_protected(void **pslot,
213 							spinlock_t *treelock)
214 {
215 	return rcu_dereference_protected(*pslot, lockdep_is_held(treelock));
216 }
217 
218 /**
219  * radix_tree_deref_retry	- check radix_tree_deref_slot
220  * @arg:	pointer returned by radix_tree_deref_slot
221  * Returns:	0 if retry is not required, otherwise retry is required
222  *
223  * radix_tree_deref_retry must be used with radix_tree_deref_slot.
224  */
radix_tree_deref_retry(void * arg)225 static inline int radix_tree_deref_retry(void *arg)
226 {
227 	return unlikely(radix_tree_is_internal_node(arg));
228 }
229 
230 /**
231  * radix_tree_exceptional_entry	- radix_tree_deref_slot gave exceptional entry?
232  * @arg:	value returned by radix_tree_deref_slot
233  * Returns:	0 if well-aligned pointer, non-0 if exceptional entry.
234  */
radix_tree_exceptional_entry(void * arg)235 static inline int radix_tree_exceptional_entry(void *arg)
236 {
237 	/* Not unlikely because radix_tree_exception often tested first */
238 	return (unsigned long)arg & RADIX_TREE_EXCEPTIONAL_ENTRY;
239 }
240 
241 /**
242  * radix_tree_exception	- radix_tree_deref_slot returned either exception?
243  * @arg:	value returned by radix_tree_deref_slot
244  * Returns:	0 if well-aligned pointer, non-0 if either kind of exception.
245  */
radix_tree_exception(void * arg)246 static inline int radix_tree_exception(void *arg)
247 {
248 	return unlikely((unsigned long)arg & RADIX_TREE_ENTRY_MASK);
249 }
250 
251 /**
252  * radix_tree_replace_slot	- replace item in a slot
253  * @pslot:	pointer to slot, returned by radix_tree_lookup_slot
254  * @item:	new item to store in the slot.
255  *
256  * For use with radix_tree_lookup_slot().  Caller must hold tree write locked
257  * across slot lookup and replacement.
258  */
radix_tree_replace_slot(void ** pslot,void * item)259 static inline void radix_tree_replace_slot(void **pslot, void *item)
260 {
261 	BUG_ON(radix_tree_is_internal_node(item));
262 	rcu_assign_pointer(*pslot, item);
263 }
264 
265 int __radix_tree_create(struct radix_tree_root *root, unsigned long index,
266 			unsigned order, struct radix_tree_node **nodep,
267 			void ***slotp);
268 int __radix_tree_insert(struct radix_tree_root *, unsigned long index,
269 			unsigned order, void *);
radix_tree_insert(struct radix_tree_root * root,unsigned long index,void * entry)270 static inline int radix_tree_insert(struct radix_tree_root *root,
271 			unsigned long index, void *entry)
272 {
273 	return __radix_tree_insert(root, index, 0, entry);
274 }
275 void *__radix_tree_lookup(struct radix_tree_root *root, unsigned long index,
276 			  struct radix_tree_node **nodep, void ***slotp);
277 void *radix_tree_lookup(struct radix_tree_root *, unsigned long);
278 void **radix_tree_lookup_slot(struct radix_tree_root *, unsigned long);
279 bool __radix_tree_delete_node(struct radix_tree_root *root,
280 			      struct radix_tree_node *node);
281 void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *);
282 void *radix_tree_delete(struct radix_tree_root *, unsigned long);
283 void radix_tree_clear_tags(struct radix_tree_root *root,
284 			   struct radix_tree_node *node,
285 			   void **slot);
286 unsigned int radix_tree_gang_lookup(struct radix_tree_root *root,
287 			void **results, unsigned long first_index,
288 			unsigned int max_items);
289 unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root,
290 			void ***results, unsigned long *indices,
291 			unsigned long first_index, unsigned int max_items);
292 int radix_tree_preload(gfp_t gfp_mask);
293 int radix_tree_maybe_preload(gfp_t gfp_mask);
294 int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order);
295 void radix_tree_init(void);
296 void *radix_tree_tag_set(struct radix_tree_root *root,
297 			unsigned long index, unsigned int tag);
298 void *radix_tree_tag_clear(struct radix_tree_root *root,
299 			unsigned long index, unsigned int tag);
300 int radix_tree_tag_get(struct radix_tree_root *root,
301 			unsigned long index, unsigned int tag);
302 unsigned int
303 radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
304 		unsigned long first_index, unsigned int max_items,
305 		unsigned int tag);
306 unsigned int
307 radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results,
308 		unsigned long first_index, unsigned int max_items,
309 		unsigned int tag);
310 unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root,
311 		unsigned long *first_indexp, unsigned long last_index,
312 		unsigned long nr_to_tag,
313 		unsigned int fromtag, unsigned int totag);
314 int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag);
315 unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item);
316 
radix_tree_preload_end(void)317 static inline void radix_tree_preload_end(void)
318 {
319 	preempt_enable();
320 }
321 
322 /**
323  * struct radix_tree_iter - radix tree iterator state
324  *
325  * @index:	index of current slot
326  * @next_index:	one beyond the last index for this chunk
327  * @tags:	bit-mask for tag-iterating
328  * @shift:	shift for the node that holds our slots
329  *
330  * This radix tree iterator works in terms of "chunks" of slots.  A chunk is a
331  * subinterval of slots contained within one radix tree leaf node.  It is
332  * described by a pointer to its first slot and a struct radix_tree_iter
333  * which holds the chunk's position in the tree and its size.  For tagged
334  * iteration radix_tree_iter also holds the slots' bit-mask for one chosen
335  * radix tree tag.
336  */
337 struct radix_tree_iter {
338 	unsigned long	index;
339 	unsigned long	next_index;
340 	unsigned long	tags;
341 #ifdef CONFIG_RADIX_TREE_MULTIORDER
342 	unsigned int	shift;
343 #endif
344 };
345 
iter_shift(struct radix_tree_iter * iter)346 static inline unsigned int iter_shift(struct radix_tree_iter *iter)
347 {
348 #ifdef CONFIG_RADIX_TREE_MULTIORDER
349 	return iter->shift;
350 #else
351 	return 0;
352 #endif
353 }
354 
355 #define RADIX_TREE_ITER_TAG_MASK	0x00FF	/* tag index in lower byte */
356 #define RADIX_TREE_ITER_TAGGED		0x0100	/* lookup tagged slots */
357 #define RADIX_TREE_ITER_CONTIG		0x0200	/* stop at first hole */
358 
359 /**
360  * radix_tree_iter_init - initialize radix tree iterator
361  *
362  * @iter:	pointer to iterator state
363  * @start:	iteration starting index
364  * Returns:	NULL
365  */
366 static __always_inline void **
radix_tree_iter_init(struct radix_tree_iter * iter,unsigned long start)367 radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start)
368 {
369 	/*
370 	 * Leave iter->tags uninitialized. radix_tree_next_chunk() will fill it
371 	 * in the case of a successful tagged chunk lookup.  If the lookup was
372 	 * unsuccessful or non-tagged then nobody cares about ->tags.
373 	 *
374 	 * Set index to zero to bypass next_index overflow protection.
375 	 * See the comment in radix_tree_next_chunk() for details.
376 	 */
377 	iter->index = 0;
378 	iter->next_index = start;
379 	return NULL;
380 }
381 
382 /**
383  * radix_tree_next_chunk - find next chunk of slots for iteration
384  *
385  * @root:	radix tree root
386  * @iter:	iterator state
387  * @flags:	RADIX_TREE_ITER_* flags and tag index
388  * Returns:	pointer to chunk first slot, or NULL if there no more left
389  *
390  * This function looks up the next chunk in the radix tree starting from
391  * @iter->next_index.  It returns a pointer to the chunk's first slot.
392  * Also it fills @iter with data about chunk: position in the tree (index),
393  * its end (next_index), and constructs a bit mask for tagged iterating (tags).
394  */
395 void **radix_tree_next_chunk(struct radix_tree_root *root,
396 			     struct radix_tree_iter *iter, unsigned flags);
397 
398 /**
399  * radix_tree_iter_retry - retry this chunk of the iteration
400  * @iter:	iterator state
401  *
402  * If we iterate over a tree protected only by the RCU lock, a race
403  * against deletion or creation may result in seeing a slot for which
404  * radix_tree_deref_retry() returns true.  If so, call this function
405  * and continue the iteration.
406  */
407 static inline __must_check
radix_tree_iter_retry(struct radix_tree_iter * iter)408 void **radix_tree_iter_retry(struct radix_tree_iter *iter)
409 {
410 	iter->next_index = iter->index;
411 	iter->tags = 0;
412 	return NULL;
413 }
414 
415 static inline unsigned long
__radix_tree_iter_add(struct radix_tree_iter * iter,unsigned long slots)416 __radix_tree_iter_add(struct radix_tree_iter *iter, unsigned long slots)
417 {
418 	return iter->index + (slots << iter_shift(iter));
419 }
420 
421 /**
422  * radix_tree_iter_next - resume iterating when the chunk may be invalid
423  * @iter:	iterator state
424  *
425  * If the iterator needs to release then reacquire a lock, the chunk may
426  * have been invalidated by an insertion or deletion.  Call this function
427  * to continue the iteration from the next index.
428  */
429 static inline __must_check
radix_tree_iter_next(struct radix_tree_iter * iter)430 void **radix_tree_iter_next(struct radix_tree_iter *iter)
431 {
432 	iter->next_index = __radix_tree_iter_add(iter, 1);
433 	iter->tags = 0;
434 	return NULL;
435 }
436 
437 /**
438  * radix_tree_chunk_size - get current chunk size
439  *
440  * @iter:	pointer to radix tree iterator
441  * Returns:	current chunk size
442  */
443 static __always_inline long
radix_tree_chunk_size(struct radix_tree_iter * iter)444 radix_tree_chunk_size(struct radix_tree_iter *iter)
445 {
446 	return (iter->next_index - iter->index) >> iter_shift(iter);
447 }
448 
entry_to_node(void * ptr)449 static inline struct radix_tree_node *entry_to_node(void *ptr)
450 {
451 	return (void *)((unsigned long)ptr & ~RADIX_TREE_INTERNAL_NODE);
452 }
453 
454 /**
455  * radix_tree_next_slot - find next slot in chunk
456  *
457  * @slot:	pointer to current slot
458  * @iter:	pointer to interator state
459  * @flags:	RADIX_TREE_ITER_*, should be constant
460  * Returns:	pointer to next slot, or NULL if there no more left
461  *
462  * This function updates @iter->index in the case of a successful lookup.
463  * For tagged lookup it also eats @iter->tags.
464  *
465  * There are several cases where 'slot' can be passed in as NULL to this
466  * function.  These cases result from the use of radix_tree_iter_next() or
467  * radix_tree_iter_retry().  In these cases we don't end up dereferencing
468  * 'slot' because either:
469  * a) we are doing tagged iteration and iter->tags has been set to 0, or
470  * b) we are doing non-tagged iteration, and iter->index and iter->next_index
471  *    have been set up so that radix_tree_chunk_size() returns 1 or 0.
472  */
473 static __always_inline void **
radix_tree_next_slot(void ** slot,struct radix_tree_iter * iter,unsigned flags)474 radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags)
475 {
476 	if (flags & RADIX_TREE_ITER_TAGGED) {
477 		void *canon = slot;
478 
479 		iter->tags >>= 1;
480 		if (unlikely(!iter->tags))
481 			return NULL;
482 		while (IS_ENABLED(CONFIG_RADIX_TREE_MULTIORDER) &&
483 					radix_tree_is_internal_node(slot[1])) {
484 			if (entry_to_node(slot[1]) == canon) {
485 				iter->tags >>= 1;
486 				iter->index = __radix_tree_iter_add(iter, 1);
487 				slot++;
488 				continue;
489 			}
490 			iter->next_index = __radix_tree_iter_add(iter, 1);
491 			return NULL;
492 		}
493 		if (likely(iter->tags & 1ul)) {
494 			iter->index = __radix_tree_iter_add(iter, 1);
495 			return slot + 1;
496 		}
497 		if (!(flags & RADIX_TREE_ITER_CONTIG)) {
498 			unsigned offset = __ffs(iter->tags);
499 
500 			iter->tags >>= offset;
501 			iter->index = __radix_tree_iter_add(iter, offset + 1);
502 			return slot + offset + 1;
503 		}
504 	} else {
505 		long count = radix_tree_chunk_size(iter);
506 		void *canon = slot;
507 
508 		while (--count > 0) {
509 			slot++;
510 			iter->index = __radix_tree_iter_add(iter, 1);
511 
512 			if (IS_ENABLED(CONFIG_RADIX_TREE_MULTIORDER) &&
513 			    radix_tree_is_internal_node(*slot)) {
514 				if (entry_to_node(*slot) == canon)
515 					continue;
516 				iter->next_index = iter->index;
517 				break;
518 			}
519 
520 			if (likely(*slot))
521 				return slot;
522 			if (flags & RADIX_TREE_ITER_CONTIG) {
523 				/* forbid switching to the next chunk */
524 				iter->next_index = 0;
525 				break;
526 			}
527 		}
528 	}
529 	return NULL;
530 }
531 
532 /**
533  * radix_tree_for_each_slot - iterate over non-empty slots
534  *
535  * @slot:	the void** variable for pointer to slot
536  * @root:	the struct radix_tree_root pointer
537  * @iter:	the struct radix_tree_iter pointer
538  * @start:	iteration starting index
539  *
540  * @slot points to radix tree slot, @iter->index contains its index.
541  */
542 #define radix_tree_for_each_slot(slot, root, iter, start)		\
543 	for (slot = radix_tree_iter_init(iter, start) ;			\
544 	     slot || (slot = radix_tree_next_chunk(root, iter, 0)) ;	\
545 	     slot = radix_tree_next_slot(slot, iter, 0))
546 
547 /**
548  * radix_tree_for_each_contig - iterate over contiguous slots
549  *
550  * @slot:	the void** variable for pointer to slot
551  * @root:	the struct radix_tree_root pointer
552  * @iter:	the struct radix_tree_iter pointer
553  * @start:	iteration starting index
554  *
555  * @slot points to radix tree slot, @iter->index contains its index.
556  */
557 #define radix_tree_for_each_contig(slot, root, iter, start)		\
558 	for (slot = radix_tree_iter_init(iter, start) ;			\
559 	     slot || (slot = radix_tree_next_chunk(root, iter,		\
560 				RADIX_TREE_ITER_CONTIG)) ;		\
561 	     slot = radix_tree_next_slot(slot, iter,			\
562 				RADIX_TREE_ITER_CONTIG))
563 
564 /**
565  * radix_tree_for_each_tagged - iterate over tagged slots
566  *
567  * @slot:	the void** variable for pointer to slot
568  * @root:	the struct radix_tree_root pointer
569  * @iter:	the struct radix_tree_iter pointer
570  * @start:	iteration starting index
571  * @tag:	tag index
572  *
573  * @slot points to radix tree slot, @iter->index contains its index.
574  */
575 #define radix_tree_for_each_tagged(slot, root, iter, start, tag)	\
576 	for (slot = radix_tree_iter_init(iter, start) ;			\
577 	     slot || (slot = radix_tree_next_chunk(root, iter,		\
578 			      RADIX_TREE_ITER_TAGGED | tag)) ;		\
579 	     slot = radix_tree_next_slot(slot, iter,			\
580 				RADIX_TREE_ITER_TAGGED))
581 
582 #endif /* _LINUX_RADIX_TREE_H */
583