1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Maple Tree implementation
4 * Copyright (c) 2018-2022 Oracle Corporation
5 * Authors: Liam R. Howlett <Liam.Howlett@oracle.com>
6 * Matthew Wilcox <willy@infradead.org>
7 * Copyright (c) 2023 ByteDance
8 * Author: Peng Zhang <zhangpeng.00@bytedance.com>
9 */
10
11 /*
12 * DOC: Interesting implementation details of the Maple Tree
13 *
14 * Each node type has a number of slots for entries and a number of slots for
15 * pivots. In the case of dense nodes, the pivots are implied by the position
16 * and are simply the slot index + the minimum of the node.
17 *
18 * In regular B-Tree terms, pivots are called keys. The term pivot is used to
19 * indicate that the tree is specifying ranges, Pivots may appear in the
20 * subtree with an entry attached to the value where as keys are unique to a
21 * specific position of a B-tree. Pivot values are inclusive of the slot with
22 * the same index.
23 *
24 *
25 * The following illustrates the layout of a range64 nodes slots and pivots.
26 *
27 *
28 * Slots -> | 0 | 1 | 2 | ... | 12 | 13 | 14 | 15 |
29 * ┬ ┬ ┬ ┬ ┬ ┬ ┬ ┬ ┬
30 * │ │ │ │ │ │ │ │ └─ Implied maximum
31 * │ │ │ │ │ │ │ └─ Pivot 14
32 * │ │ │ │ │ │ └─ Pivot 13
33 * │ │ │ │ │ └─ Pivot 12
34 * │ │ │ │ └─ Pivot 11
35 * │ │ │ └─ Pivot 2
36 * │ │ └─ Pivot 1
37 * │ └─ Pivot 0
38 * └─ Implied minimum
39 *
40 * Slot contents:
41 * Internal (non-leaf) nodes contain pointers to other nodes.
42 * Leaf nodes contain entries.
43 *
44 * The location of interest is often referred to as an offset. All offsets have
45 * a slot, but the last offset has an implied pivot from the node above (or
46 * UINT_MAX for the root node.
47 *
48 * Ranges complicate certain write activities. When modifying any of
49 * the B-tree variants, it is known that one entry will either be added or
50 * deleted. When modifying the Maple Tree, one store operation may overwrite
51 * the entire data set, or one half of the tree, or the middle half of the tree.
52 *
53 */
54
55
56 #include <linux/maple_tree.h>
57 #include <linux/xarray.h>
58 #include <linux/types.h>
59 #include <linux/export.h>
60 #include <linux/slab.h>
61 #include <linux/limits.h>
62 #include <asm/barrier.h>
63
64 #define CREATE_TRACE_POINTS
65 #include <trace/events/maple_tree.h>
66
67 #define MA_ROOT_PARENT 1
68
69 /*
70 * Maple state flags
71 * * MA_STATE_BULK - Bulk insert mode
72 * * MA_STATE_REBALANCE - Indicate a rebalance during bulk insert
73 * * MA_STATE_PREALLOC - Preallocated nodes, WARN_ON allocation
74 */
75 #define MA_STATE_BULK 1
76 #define MA_STATE_REBALANCE 2
77 #define MA_STATE_PREALLOC 4
78
79 #define ma_parent_ptr(x) ((struct maple_pnode *)(x))
80 #define mas_tree_parent(x) ((unsigned long)(x->tree) | MA_ROOT_PARENT)
81 #define ma_mnode_ptr(x) ((struct maple_node *)(x))
82 #define ma_enode_ptr(x) ((struct maple_enode *)(x))
83 static struct kmem_cache *maple_node_cache;
84
85 #ifdef CONFIG_DEBUG_MAPLE_TREE
86 static const unsigned long mt_max[] = {
87 [maple_dense] = MAPLE_NODE_SLOTS,
88 [maple_leaf_64] = ULONG_MAX,
89 [maple_range_64] = ULONG_MAX,
90 [maple_arange_64] = ULONG_MAX,
91 };
92 #define mt_node_max(x) mt_max[mte_node_type(x)]
93 #endif
94
95 static const unsigned char mt_slots[] = {
96 [maple_dense] = MAPLE_NODE_SLOTS,
97 [maple_leaf_64] = MAPLE_RANGE64_SLOTS,
98 [maple_range_64] = MAPLE_RANGE64_SLOTS,
99 [maple_arange_64] = MAPLE_ARANGE64_SLOTS,
100 };
101 #define mt_slot_count(x) mt_slots[mte_node_type(x)]
102
103 static const unsigned char mt_pivots[] = {
104 [maple_dense] = 0,
105 [maple_leaf_64] = MAPLE_RANGE64_SLOTS - 1,
106 [maple_range_64] = MAPLE_RANGE64_SLOTS - 1,
107 [maple_arange_64] = MAPLE_ARANGE64_SLOTS - 1,
108 };
109 #define mt_pivot_count(x) mt_pivots[mte_node_type(x)]
110
111 static const unsigned char mt_min_slots[] = {
112 [maple_dense] = MAPLE_NODE_SLOTS / 2,
113 [maple_leaf_64] = (MAPLE_RANGE64_SLOTS / 2) - 2,
114 [maple_range_64] = (MAPLE_RANGE64_SLOTS / 2) - 2,
115 [maple_arange_64] = (MAPLE_ARANGE64_SLOTS / 2) - 1,
116 };
117 #define mt_min_slot_count(x) mt_min_slots[mte_node_type(x)]
118
119 #define MAPLE_BIG_NODE_SLOTS (MAPLE_RANGE64_SLOTS * 2 + 2)
120 #define MAPLE_BIG_NODE_GAPS (MAPLE_ARANGE64_SLOTS * 2 + 1)
121
122 struct maple_big_node {
123 struct maple_pnode *parent;
124 unsigned long pivot[MAPLE_BIG_NODE_SLOTS - 1];
125 union {
126 struct maple_enode *slot[MAPLE_BIG_NODE_SLOTS];
127 struct {
128 unsigned long padding[MAPLE_BIG_NODE_GAPS];
129 unsigned long gap[MAPLE_BIG_NODE_GAPS];
130 };
131 };
132 unsigned char b_end;
133 enum maple_type type;
134 };
135
136 /*
137 * The maple_subtree_state is used to build a tree to replace a segment of an
138 * existing tree in a more atomic way. Any walkers of the older tree will hit a
139 * dead node and restart on updates.
140 */
141 struct maple_subtree_state {
142 struct ma_state *orig_l; /* Original left side of subtree */
143 struct ma_state *orig_r; /* Original right side of subtree */
144 struct ma_state *l; /* New left side of subtree */
145 struct ma_state *m; /* New middle of subtree (rare) */
146 struct ma_state *r; /* New right side of subtree */
147 struct ma_topiary *free; /* nodes to be freed */
148 struct ma_topiary *destroy; /* Nodes to be destroyed (walked and freed) */
149 struct maple_big_node *bn;
150 };
151
152 #ifdef CONFIG_KASAN_STACK
153 /* Prevent mas_wr_bnode() from exceeding the stack frame limit */
154 #define noinline_for_kasan noinline_for_stack
155 #else
156 #define noinline_for_kasan inline
157 #endif
158
159 /* Functions */
mt_alloc_one(gfp_t gfp)160 static inline struct maple_node *mt_alloc_one(gfp_t gfp)
161 {
162 return kmem_cache_alloc(maple_node_cache, gfp);
163 }
164
mt_alloc_bulk(gfp_t gfp,size_t size,void ** nodes)165 static inline int mt_alloc_bulk(gfp_t gfp, size_t size, void **nodes)
166 {
167 return kmem_cache_alloc_bulk(maple_node_cache, gfp, size, nodes);
168 }
169
mt_free_one(struct maple_node * node)170 static inline void mt_free_one(struct maple_node *node)
171 {
172 kmem_cache_free(maple_node_cache, node);
173 }
174
mt_free_bulk(size_t size,void __rcu ** nodes)175 static inline void mt_free_bulk(size_t size, void __rcu **nodes)
176 {
177 kmem_cache_free_bulk(maple_node_cache, size, (void **)nodes);
178 }
179
mt_free_rcu(struct rcu_head * head)180 static void mt_free_rcu(struct rcu_head *head)
181 {
182 struct maple_node *node = container_of(head, struct maple_node, rcu);
183
184 kmem_cache_free(maple_node_cache, node);
185 }
186
187 /*
188 * ma_free_rcu() - Use rcu callback to free a maple node
189 * @node: The node to free
190 *
191 * The maple tree uses the parent pointer to indicate this node is no longer in
192 * use and will be freed.
193 */
ma_free_rcu(struct maple_node * node)194 static void ma_free_rcu(struct maple_node *node)
195 {
196 WARN_ON(node->parent != ma_parent_ptr(node));
197 call_rcu(&node->rcu, mt_free_rcu);
198 }
199
mas_set_height(struct ma_state * mas)200 static void mas_set_height(struct ma_state *mas)
201 {
202 unsigned int new_flags = mas->tree->ma_flags;
203
204 new_flags &= ~MT_FLAGS_HEIGHT_MASK;
205 MAS_BUG_ON(mas, mas->depth > MAPLE_HEIGHT_MAX);
206 new_flags |= mas->depth << MT_FLAGS_HEIGHT_OFFSET;
207 mas->tree->ma_flags = new_flags;
208 }
209
mas_mt_height(struct ma_state * mas)210 static unsigned int mas_mt_height(struct ma_state *mas)
211 {
212 return mt_height(mas->tree);
213 }
214
mt_attr(struct maple_tree * mt)215 static inline unsigned int mt_attr(struct maple_tree *mt)
216 {
217 return mt->ma_flags & ~MT_FLAGS_HEIGHT_MASK;
218 }
219
mte_node_type(const struct maple_enode * entry)220 static inline enum maple_type mte_node_type(const struct maple_enode *entry)
221 {
222 return ((unsigned long)entry >> MAPLE_NODE_TYPE_SHIFT) &
223 MAPLE_NODE_TYPE_MASK;
224 }
225
ma_is_dense(const enum maple_type type)226 static inline bool ma_is_dense(const enum maple_type type)
227 {
228 return type < maple_leaf_64;
229 }
230
ma_is_leaf(const enum maple_type type)231 static inline bool ma_is_leaf(const enum maple_type type)
232 {
233 return type < maple_range_64;
234 }
235
mte_is_leaf(const struct maple_enode * entry)236 static inline bool mte_is_leaf(const struct maple_enode *entry)
237 {
238 return ma_is_leaf(mte_node_type(entry));
239 }
240
241 /*
242 * We also reserve values with the bottom two bits set to '10' which are
243 * below 4096
244 */
mt_is_reserved(const void * entry)245 static inline bool mt_is_reserved(const void *entry)
246 {
247 return ((unsigned long)entry < MAPLE_RESERVED_RANGE) &&
248 xa_is_internal(entry);
249 }
250
mas_set_err(struct ma_state * mas,long err)251 static inline void mas_set_err(struct ma_state *mas, long err)
252 {
253 mas->node = MA_ERROR(err);
254 }
255
mas_is_ptr(const struct ma_state * mas)256 static inline bool mas_is_ptr(const struct ma_state *mas)
257 {
258 return mas->node == MAS_ROOT;
259 }
260
mas_is_start(const struct ma_state * mas)261 static inline bool mas_is_start(const struct ma_state *mas)
262 {
263 return mas->node == MAS_START;
264 }
265
mas_is_err(struct ma_state * mas)266 bool mas_is_err(struct ma_state *mas)
267 {
268 return xa_is_err(mas->node);
269 }
270
mas_is_overflow(struct ma_state * mas)271 static __always_inline bool mas_is_overflow(struct ma_state *mas)
272 {
273 if (unlikely(mas->node == MAS_OVERFLOW))
274 return true;
275
276 return false;
277 }
278
mas_is_underflow(struct ma_state * mas)279 static __always_inline bool mas_is_underflow(struct ma_state *mas)
280 {
281 if (unlikely(mas->node == MAS_UNDERFLOW))
282 return true;
283
284 return false;
285 }
286
mas_searchable(struct ma_state * mas)287 static inline bool mas_searchable(struct ma_state *mas)
288 {
289 if (mas_is_none(mas))
290 return false;
291
292 if (mas_is_ptr(mas))
293 return false;
294
295 return true;
296 }
297
mte_to_node(const struct maple_enode * entry)298 static inline struct maple_node *mte_to_node(const struct maple_enode *entry)
299 {
300 return (struct maple_node *)((unsigned long)entry & ~MAPLE_NODE_MASK);
301 }
302
303 /*
304 * mte_to_mat() - Convert a maple encoded node to a maple topiary node.
305 * @entry: The maple encoded node
306 *
307 * Return: a maple topiary pointer
308 */
mte_to_mat(const struct maple_enode * entry)309 static inline struct maple_topiary *mte_to_mat(const struct maple_enode *entry)
310 {
311 return (struct maple_topiary *)
312 ((unsigned long)entry & ~MAPLE_NODE_MASK);
313 }
314
315 /*
316 * mas_mn() - Get the maple state node.
317 * @mas: The maple state
318 *
319 * Return: the maple node (not encoded - bare pointer).
320 */
mas_mn(const struct ma_state * mas)321 static inline struct maple_node *mas_mn(const struct ma_state *mas)
322 {
323 return mte_to_node(mas->node);
324 }
325
326 /*
327 * mte_set_node_dead() - Set a maple encoded node as dead.
328 * @mn: The maple encoded node.
329 */
mte_set_node_dead(struct maple_enode * mn)330 static inline void mte_set_node_dead(struct maple_enode *mn)
331 {
332 mte_to_node(mn)->parent = ma_parent_ptr(mte_to_node(mn));
333 smp_wmb(); /* Needed for RCU */
334 }
335
336 /* Bit 1 indicates the root is a node */
337 #define MAPLE_ROOT_NODE 0x02
338 /* maple_type stored bit 3-6 */
339 #define MAPLE_ENODE_TYPE_SHIFT 0x03
340 /* Bit 2 means a NULL somewhere below */
341 #define MAPLE_ENODE_NULL 0x04
342
mt_mk_node(const struct maple_node * node,enum maple_type type)343 static inline struct maple_enode *mt_mk_node(const struct maple_node *node,
344 enum maple_type type)
345 {
346 return (void *)((unsigned long)node |
347 (type << MAPLE_ENODE_TYPE_SHIFT) | MAPLE_ENODE_NULL);
348 }
349
mte_mk_root(const struct maple_enode * node)350 static inline void *mte_mk_root(const struct maple_enode *node)
351 {
352 return (void *)((unsigned long)node | MAPLE_ROOT_NODE);
353 }
354
mte_safe_root(const struct maple_enode * node)355 static inline void *mte_safe_root(const struct maple_enode *node)
356 {
357 return (void *)((unsigned long)node & ~MAPLE_ROOT_NODE);
358 }
359
mte_set_full(const struct maple_enode * node)360 static inline void *mte_set_full(const struct maple_enode *node)
361 {
362 return (void *)((unsigned long)node & ~MAPLE_ENODE_NULL);
363 }
364
mte_clear_full(const struct maple_enode * node)365 static inline void *mte_clear_full(const struct maple_enode *node)
366 {
367 return (void *)((unsigned long)node | MAPLE_ENODE_NULL);
368 }
369
mte_has_null(const struct maple_enode * node)370 static inline bool mte_has_null(const struct maple_enode *node)
371 {
372 return (unsigned long)node & MAPLE_ENODE_NULL;
373 }
374
ma_is_root(struct maple_node * node)375 static inline bool ma_is_root(struct maple_node *node)
376 {
377 return ((unsigned long)node->parent & MA_ROOT_PARENT);
378 }
379
mte_is_root(const struct maple_enode * node)380 static inline bool mte_is_root(const struct maple_enode *node)
381 {
382 return ma_is_root(mte_to_node(node));
383 }
384
mas_is_root_limits(const struct ma_state * mas)385 static inline bool mas_is_root_limits(const struct ma_state *mas)
386 {
387 return !mas->min && mas->max == ULONG_MAX;
388 }
389
mt_is_alloc(struct maple_tree * mt)390 static inline bool mt_is_alloc(struct maple_tree *mt)
391 {
392 return (mt->ma_flags & MT_FLAGS_ALLOC_RANGE);
393 }
394
395 /*
396 * The Parent Pointer
397 * Excluding root, the parent pointer is 256B aligned like all other tree nodes.
398 * When storing a 32 or 64 bit values, the offset can fit into 5 bits. The 16
399 * bit values need an extra bit to store the offset. This extra bit comes from
400 * a reuse of the last bit in the node type. This is possible by using bit 1 to
401 * indicate if bit 2 is part of the type or the slot.
402 *
403 * Note types:
404 * 0x??1 = Root
405 * 0x?00 = 16 bit nodes
406 * 0x010 = 32 bit nodes
407 * 0x110 = 64 bit nodes
408 *
409 * Slot size and alignment
410 * 0b??1 : Root
411 * 0b?00 : 16 bit values, type in 0-1, slot in 2-7
412 * 0b010 : 32 bit values, type in 0-2, slot in 3-7
413 * 0b110 : 64 bit values, type in 0-2, slot in 3-7
414 */
415
416 #define MAPLE_PARENT_ROOT 0x01
417
418 #define MAPLE_PARENT_SLOT_SHIFT 0x03
419 #define MAPLE_PARENT_SLOT_MASK 0xF8
420
421 #define MAPLE_PARENT_16B_SLOT_SHIFT 0x02
422 #define MAPLE_PARENT_16B_SLOT_MASK 0xFC
423
424 #define MAPLE_PARENT_RANGE64 0x06
425 #define MAPLE_PARENT_RANGE32 0x04
426 #define MAPLE_PARENT_NOT_RANGE16 0x02
427
428 /*
429 * mte_parent_shift() - Get the parent shift for the slot storage.
430 * @parent: The parent pointer cast as an unsigned long
431 * Return: The shift into that pointer to the star to of the slot
432 */
mte_parent_shift(unsigned long parent)433 static inline unsigned long mte_parent_shift(unsigned long parent)
434 {
435 /* Note bit 1 == 0 means 16B */
436 if (likely(parent & MAPLE_PARENT_NOT_RANGE16))
437 return MAPLE_PARENT_SLOT_SHIFT;
438
439 return MAPLE_PARENT_16B_SLOT_SHIFT;
440 }
441
442 /*
443 * mte_parent_slot_mask() - Get the slot mask for the parent.
444 * @parent: The parent pointer cast as an unsigned long.
445 * Return: The slot mask for that parent.
446 */
mte_parent_slot_mask(unsigned long parent)447 static inline unsigned long mte_parent_slot_mask(unsigned long parent)
448 {
449 /* Note bit 1 == 0 means 16B */
450 if (likely(parent & MAPLE_PARENT_NOT_RANGE16))
451 return MAPLE_PARENT_SLOT_MASK;
452
453 return MAPLE_PARENT_16B_SLOT_MASK;
454 }
455
456 /*
457 * mas_parent_type() - Return the maple_type of the parent from the stored
458 * parent type.
459 * @mas: The maple state
460 * @enode: The maple_enode to extract the parent's enum
461 * Return: The node->parent maple_type
462 */
463 static inline
mas_parent_type(struct ma_state * mas,struct maple_enode * enode)464 enum maple_type mas_parent_type(struct ma_state *mas, struct maple_enode *enode)
465 {
466 unsigned long p_type;
467
468 p_type = (unsigned long)mte_to_node(enode)->parent;
469 if (WARN_ON(p_type & MAPLE_PARENT_ROOT))
470 return 0;
471
472 p_type &= MAPLE_NODE_MASK;
473 p_type &= ~mte_parent_slot_mask(p_type);
474 switch (p_type) {
475 case MAPLE_PARENT_RANGE64: /* or MAPLE_PARENT_ARANGE64 */
476 if (mt_is_alloc(mas->tree))
477 return maple_arange_64;
478 return maple_range_64;
479 }
480
481 return 0;
482 }
483
484 /*
485 * mas_set_parent() - Set the parent node and encode the slot
486 * @enode: The encoded maple node.
487 * @parent: The encoded maple node that is the parent of @enode.
488 * @slot: The slot that @enode resides in @parent.
489 *
490 * Slot number is encoded in the enode->parent bit 3-6 or 2-6, depending on the
491 * parent type.
492 */
493 static inline
mas_set_parent(struct ma_state * mas,struct maple_enode * enode,const struct maple_enode * parent,unsigned char slot)494 void mas_set_parent(struct ma_state *mas, struct maple_enode *enode,
495 const struct maple_enode *parent, unsigned char slot)
496 {
497 unsigned long val = (unsigned long)parent;
498 unsigned long shift;
499 unsigned long type;
500 enum maple_type p_type = mte_node_type(parent);
501
502 MAS_BUG_ON(mas, p_type == maple_dense);
503 MAS_BUG_ON(mas, p_type == maple_leaf_64);
504
505 switch (p_type) {
506 case maple_range_64:
507 case maple_arange_64:
508 shift = MAPLE_PARENT_SLOT_SHIFT;
509 type = MAPLE_PARENT_RANGE64;
510 break;
511 default:
512 case maple_dense:
513 case maple_leaf_64:
514 shift = type = 0;
515 break;
516 }
517
518 val &= ~MAPLE_NODE_MASK; /* Clear all node metadata in parent */
519 val |= (slot << shift) | type;
520 mte_to_node(enode)->parent = ma_parent_ptr(val);
521 }
522
523 /*
524 * mte_parent_slot() - get the parent slot of @enode.
525 * @enode: The encoded maple node.
526 *
527 * Return: The slot in the parent node where @enode resides.
528 */
mte_parent_slot(const struct maple_enode * enode)529 static inline unsigned int mte_parent_slot(const struct maple_enode *enode)
530 {
531 unsigned long val = (unsigned long)mte_to_node(enode)->parent;
532
533 if (val & MA_ROOT_PARENT)
534 return 0;
535
536 /*
537 * Okay to use MAPLE_PARENT_16B_SLOT_MASK as the last bit will be lost
538 * by shift if the parent shift is MAPLE_PARENT_SLOT_SHIFT
539 */
540 return (val & MAPLE_PARENT_16B_SLOT_MASK) >> mte_parent_shift(val);
541 }
542
543 /*
544 * mte_parent() - Get the parent of @node.
545 * @node: The encoded maple node.
546 *
547 * Return: The parent maple node.
548 */
mte_parent(const struct maple_enode * enode)549 static inline struct maple_node *mte_parent(const struct maple_enode *enode)
550 {
551 return (void *)((unsigned long)
552 (mte_to_node(enode)->parent) & ~MAPLE_NODE_MASK);
553 }
554
555 /*
556 * ma_dead_node() - check if the @enode is dead.
557 * @enode: The encoded maple node
558 *
559 * Return: true if dead, false otherwise.
560 */
ma_dead_node(const struct maple_node * node)561 static inline bool ma_dead_node(const struct maple_node *node)
562 {
563 struct maple_node *parent;
564
565 /* Do not reorder reads from the node prior to the parent check */
566 smp_rmb();
567 parent = (void *)((unsigned long) node->parent & ~MAPLE_NODE_MASK);
568 return (parent == node);
569 }
570
571 /*
572 * mte_dead_node() - check if the @enode is dead.
573 * @enode: The encoded maple node
574 *
575 * Return: true if dead, false otherwise.
576 */
mte_dead_node(const struct maple_enode * enode)577 static inline bool mte_dead_node(const struct maple_enode *enode)
578 {
579 struct maple_node *parent, *node;
580
581 node = mte_to_node(enode);
582 /* Do not reorder reads from the node prior to the parent check */
583 smp_rmb();
584 parent = mte_parent(enode);
585 return (parent == node);
586 }
587
588 /*
589 * mas_allocated() - Get the number of nodes allocated in a maple state.
590 * @mas: The maple state
591 *
592 * The ma_state alloc member is overloaded to hold a pointer to the first
593 * allocated node or to the number of requested nodes to allocate. If bit 0 is
594 * set, then the alloc contains the number of requested nodes. If there is an
595 * allocated node, then the total allocated nodes is in that node.
596 *
597 * Return: The total number of nodes allocated
598 */
mas_allocated(const struct ma_state * mas)599 static inline unsigned long mas_allocated(const struct ma_state *mas)
600 {
601 if (!mas->alloc || ((unsigned long)mas->alloc & 0x1))
602 return 0;
603
604 return mas->alloc->total;
605 }
606
607 /*
608 * mas_set_alloc_req() - Set the requested number of allocations.
609 * @mas: the maple state
610 * @count: the number of allocations.
611 *
612 * The requested number of allocations is either in the first allocated node,
613 * located in @mas->alloc->request_count, or directly in @mas->alloc if there is
614 * no allocated node. Set the request either in the node or do the necessary
615 * encoding to store in @mas->alloc directly.
616 */
mas_set_alloc_req(struct ma_state * mas,unsigned long count)617 static inline void mas_set_alloc_req(struct ma_state *mas, unsigned long count)
618 {
619 if (!mas->alloc || ((unsigned long)mas->alloc & 0x1)) {
620 if (!count)
621 mas->alloc = NULL;
622 else
623 mas->alloc = (struct maple_alloc *)(((count) << 1U) | 1U);
624 return;
625 }
626
627 mas->alloc->request_count = count;
628 }
629
630 /*
631 * mas_alloc_req() - get the requested number of allocations.
632 * @mas: The maple state
633 *
634 * The alloc count is either stored directly in @mas, or in
635 * @mas->alloc->request_count if there is at least one node allocated. Decode
636 * the request count if it's stored directly in @mas->alloc.
637 *
638 * Return: The allocation request count.
639 */
mas_alloc_req(const struct ma_state * mas)640 static inline unsigned int mas_alloc_req(const struct ma_state *mas)
641 {
642 if ((unsigned long)mas->alloc & 0x1)
643 return (unsigned long)(mas->alloc) >> 1;
644 else if (mas->alloc)
645 return mas->alloc->request_count;
646 return 0;
647 }
648
649 /*
650 * ma_pivots() - Get a pointer to the maple node pivots.
651 * @node - the maple node
652 * @type - the node type
653 *
654 * In the event of a dead node, this array may be %NULL
655 *
656 * Return: A pointer to the maple node pivots
657 */
ma_pivots(struct maple_node * node,enum maple_type type)658 static inline unsigned long *ma_pivots(struct maple_node *node,
659 enum maple_type type)
660 {
661 switch (type) {
662 case maple_arange_64:
663 return node->ma64.pivot;
664 case maple_range_64:
665 case maple_leaf_64:
666 return node->mr64.pivot;
667 case maple_dense:
668 return NULL;
669 }
670 return NULL;
671 }
672
673 /*
674 * ma_gaps() - Get a pointer to the maple node gaps.
675 * @node - the maple node
676 * @type - the node type
677 *
678 * Return: A pointer to the maple node gaps
679 */
ma_gaps(struct maple_node * node,enum maple_type type)680 static inline unsigned long *ma_gaps(struct maple_node *node,
681 enum maple_type type)
682 {
683 switch (type) {
684 case maple_arange_64:
685 return node->ma64.gap;
686 case maple_range_64:
687 case maple_leaf_64:
688 case maple_dense:
689 return NULL;
690 }
691 return NULL;
692 }
693
694 /*
695 * mas_pivot() - Get the pivot at @piv of the maple encoded node.
696 * @mas: The maple state.
697 * @piv: The pivot.
698 *
699 * Return: the pivot at @piv of @mn.
700 */
mas_pivot(struct ma_state * mas,unsigned char piv)701 static inline unsigned long mas_pivot(struct ma_state *mas, unsigned char piv)
702 {
703 struct maple_node *node = mas_mn(mas);
704 enum maple_type type = mte_node_type(mas->node);
705
706 if (MAS_WARN_ON(mas, piv >= mt_pivots[type])) {
707 mas_set_err(mas, -EIO);
708 return 0;
709 }
710
711 switch (type) {
712 case maple_arange_64:
713 return node->ma64.pivot[piv];
714 case maple_range_64:
715 case maple_leaf_64:
716 return node->mr64.pivot[piv];
717 case maple_dense:
718 return 0;
719 }
720 return 0;
721 }
722
723 /*
724 * mas_safe_pivot() - get the pivot at @piv or mas->max.
725 * @mas: The maple state
726 * @pivots: The pointer to the maple node pivots
727 * @piv: The pivot to fetch
728 * @type: The maple node type
729 *
730 * Return: The pivot at @piv within the limit of the @pivots array, @mas->max
731 * otherwise.
732 */
733 static inline unsigned long
mas_safe_pivot(const struct ma_state * mas,unsigned long * pivots,unsigned char piv,enum maple_type type)734 mas_safe_pivot(const struct ma_state *mas, unsigned long *pivots,
735 unsigned char piv, enum maple_type type)
736 {
737 if (piv >= mt_pivots[type])
738 return mas->max;
739
740 return pivots[piv];
741 }
742
743 /*
744 * mas_safe_min() - Return the minimum for a given offset.
745 * @mas: The maple state
746 * @pivots: The pointer to the maple node pivots
747 * @offset: The offset into the pivot array
748 *
749 * Return: The minimum range value that is contained in @offset.
750 */
751 static inline unsigned long
mas_safe_min(struct ma_state * mas,unsigned long * pivots,unsigned char offset)752 mas_safe_min(struct ma_state *mas, unsigned long *pivots, unsigned char offset)
753 {
754 if (likely(offset))
755 return pivots[offset - 1] + 1;
756
757 return mas->min;
758 }
759
760 /*
761 * mte_set_pivot() - Set a pivot to a value in an encoded maple node.
762 * @mn: The encoded maple node
763 * @piv: The pivot offset
764 * @val: The value of the pivot
765 */
mte_set_pivot(struct maple_enode * mn,unsigned char piv,unsigned long val)766 static inline void mte_set_pivot(struct maple_enode *mn, unsigned char piv,
767 unsigned long val)
768 {
769 struct maple_node *node = mte_to_node(mn);
770 enum maple_type type = mte_node_type(mn);
771
772 BUG_ON(piv >= mt_pivots[type]);
773 switch (type) {
774 default:
775 case maple_range_64:
776 case maple_leaf_64:
777 node->mr64.pivot[piv] = val;
778 break;
779 case maple_arange_64:
780 node->ma64.pivot[piv] = val;
781 break;
782 case maple_dense:
783 break;
784 }
785
786 }
787
788 /*
789 * ma_slots() - Get a pointer to the maple node slots.
790 * @mn: The maple node
791 * @mt: The maple node type
792 *
793 * Return: A pointer to the maple node slots
794 */
ma_slots(struct maple_node * mn,enum maple_type mt)795 static inline void __rcu **ma_slots(struct maple_node *mn, enum maple_type mt)
796 {
797 switch (mt) {
798 default:
799 case maple_arange_64:
800 return mn->ma64.slot;
801 case maple_range_64:
802 case maple_leaf_64:
803 return mn->mr64.slot;
804 case maple_dense:
805 return mn->slot;
806 }
807 }
808
mt_write_locked(const struct maple_tree * mt)809 static inline bool mt_write_locked(const struct maple_tree *mt)
810 {
811 return mt_external_lock(mt) ? mt_write_lock_is_held(mt) :
812 lockdep_is_held(&mt->ma_lock);
813 }
814
mt_locked(const struct maple_tree * mt)815 static inline bool mt_locked(const struct maple_tree *mt)
816 {
817 return mt_external_lock(mt) ? mt_lock_is_held(mt) :
818 lockdep_is_held(&mt->ma_lock);
819 }
820
mt_slot(const struct maple_tree * mt,void __rcu ** slots,unsigned char offset)821 static inline void *mt_slot(const struct maple_tree *mt,
822 void __rcu **slots, unsigned char offset)
823 {
824 return rcu_dereference_check(slots[offset], mt_locked(mt));
825 }
826
mt_slot_locked(struct maple_tree * mt,void __rcu ** slots,unsigned char offset)827 static inline void *mt_slot_locked(struct maple_tree *mt, void __rcu **slots,
828 unsigned char offset)
829 {
830 return rcu_dereference_protected(slots[offset], mt_write_locked(mt));
831 }
832 /*
833 * mas_slot_locked() - Get the slot value when holding the maple tree lock.
834 * @mas: The maple state
835 * @slots: The pointer to the slots
836 * @offset: The offset into the slots array to fetch
837 *
838 * Return: The entry stored in @slots at the @offset.
839 */
mas_slot_locked(struct ma_state * mas,void __rcu ** slots,unsigned char offset)840 static inline void *mas_slot_locked(struct ma_state *mas, void __rcu **slots,
841 unsigned char offset)
842 {
843 return mt_slot_locked(mas->tree, slots, offset);
844 }
845
846 /*
847 * mas_slot() - Get the slot value when not holding the maple tree lock.
848 * @mas: The maple state
849 * @slots: The pointer to the slots
850 * @offset: The offset into the slots array to fetch
851 *
852 * Return: The entry stored in @slots at the @offset
853 */
mas_slot(struct ma_state * mas,void __rcu ** slots,unsigned char offset)854 static inline void *mas_slot(struct ma_state *mas, void __rcu **slots,
855 unsigned char offset)
856 {
857 return mt_slot(mas->tree, slots, offset);
858 }
859
860 /*
861 * mas_root() - Get the maple tree root.
862 * @mas: The maple state.
863 *
864 * Return: The pointer to the root of the tree
865 */
mas_root(struct ma_state * mas)866 static inline void *mas_root(struct ma_state *mas)
867 {
868 return rcu_dereference_check(mas->tree->ma_root, mt_locked(mas->tree));
869 }
870
mt_root_locked(struct maple_tree * mt)871 static inline void *mt_root_locked(struct maple_tree *mt)
872 {
873 return rcu_dereference_protected(mt->ma_root, mt_write_locked(mt));
874 }
875
876 /*
877 * mas_root_locked() - Get the maple tree root when holding the maple tree lock.
878 * @mas: The maple state.
879 *
880 * Return: The pointer to the root of the tree
881 */
mas_root_locked(struct ma_state * mas)882 static inline void *mas_root_locked(struct ma_state *mas)
883 {
884 return mt_root_locked(mas->tree);
885 }
886
ma_meta(struct maple_node * mn,enum maple_type mt)887 static inline struct maple_metadata *ma_meta(struct maple_node *mn,
888 enum maple_type mt)
889 {
890 switch (mt) {
891 case maple_arange_64:
892 return &mn->ma64.meta;
893 default:
894 return &mn->mr64.meta;
895 }
896 }
897
898 /*
899 * ma_set_meta() - Set the metadata information of a node.
900 * @mn: The maple node
901 * @mt: The maple node type
902 * @offset: The offset of the highest sub-gap in this node.
903 * @end: The end of the data in this node.
904 */
ma_set_meta(struct maple_node * mn,enum maple_type mt,unsigned char offset,unsigned char end)905 static inline void ma_set_meta(struct maple_node *mn, enum maple_type mt,
906 unsigned char offset, unsigned char end)
907 {
908 struct maple_metadata *meta = ma_meta(mn, mt);
909
910 meta->gap = offset;
911 meta->end = end;
912 }
913
914 /*
915 * mt_clear_meta() - clear the metadata information of a node, if it exists
916 * @mt: The maple tree
917 * @mn: The maple node
918 * @type: The maple node type
919 * @offset: The offset of the highest sub-gap in this node.
920 * @end: The end of the data in this node.
921 */
mt_clear_meta(struct maple_tree * mt,struct maple_node * mn,enum maple_type type)922 static inline void mt_clear_meta(struct maple_tree *mt, struct maple_node *mn,
923 enum maple_type type)
924 {
925 struct maple_metadata *meta;
926 unsigned long *pivots;
927 void __rcu **slots;
928 void *next;
929
930 switch (type) {
931 case maple_range_64:
932 pivots = mn->mr64.pivot;
933 if (unlikely(pivots[MAPLE_RANGE64_SLOTS - 2])) {
934 slots = mn->mr64.slot;
935 next = mt_slot_locked(mt, slots,
936 MAPLE_RANGE64_SLOTS - 1);
937 if (unlikely((mte_to_node(next) &&
938 mte_node_type(next))))
939 return; /* no metadata, could be node */
940 }
941 fallthrough;
942 case maple_arange_64:
943 meta = ma_meta(mn, type);
944 break;
945 default:
946 return;
947 }
948
949 meta->gap = 0;
950 meta->end = 0;
951 }
952
953 /*
954 * ma_meta_end() - Get the data end of a node from the metadata
955 * @mn: The maple node
956 * @mt: The maple node type
957 */
ma_meta_end(struct maple_node * mn,enum maple_type mt)958 static inline unsigned char ma_meta_end(struct maple_node *mn,
959 enum maple_type mt)
960 {
961 struct maple_metadata *meta = ma_meta(mn, mt);
962
963 return meta->end;
964 }
965
966 /*
967 * ma_meta_gap() - Get the largest gap location of a node from the metadata
968 * @mn: The maple node
969 * @mt: The maple node type
970 */
ma_meta_gap(struct maple_node * mn,enum maple_type mt)971 static inline unsigned char ma_meta_gap(struct maple_node *mn,
972 enum maple_type mt)
973 {
974 return mn->ma64.meta.gap;
975 }
976
977 /*
978 * ma_set_meta_gap() - Set the largest gap location in a nodes metadata
979 * @mn: The maple node
980 * @mn: The maple node type
981 * @offset: The location of the largest gap.
982 */
ma_set_meta_gap(struct maple_node * mn,enum maple_type mt,unsigned char offset)983 static inline void ma_set_meta_gap(struct maple_node *mn, enum maple_type mt,
984 unsigned char offset)
985 {
986
987 struct maple_metadata *meta = ma_meta(mn, mt);
988
989 meta->gap = offset;
990 }
991
992 /*
993 * mat_add() - Add a @dead_enode to the ma_topiary of a list of dead nodes.
994 * @mat - the ma_topiary, a linked list of dead nodes.
995 * @dead_enode - the node to be marked as dead and added to the tail of the list
996 *
997 * Add the @dead_enode to the linked list in @mat.
998 */
mat_add(struct ma_topiary * mat,struct maple_enode * dead_enode)999 static inline void mat_add(struct ma_topiary *mat,
1000 struct maple_enode *dead_enode)
1001 {
1002 mte_set_node_dead(dead_enode);
1003 mte_to_mat(dead_enode)->next = NULL;
1004 if (!mat->tail) {
1005 mat->tail = mat->head = dead_enode;
1006 return;
1007 }
1008
1009 mte_to_mat(mat->tail)->next = dead_enode;
1010 mat->tail = dead_enode;
1011 }
1012
1013 static void mt_free_walk(struct rcu_head *head);
1014 static void mt_destroy_walk(struct maple_enode *enode, struct maple_tree *mt,
1015 bool free);
1016 /*
1017 * mas_mat_destroy() - Free all nodes and subtrees in a dead list.
1018 * @mas - the maple state
1019 * @mat - the ma_topiary linked list of dead nodes to free.
1020 *
1021 * Destroy walk a dead list.
1022 */
mas_mat_destroy(struct ma_state * mas,struct ma_topiary * mat)1023 static void mas_mat_destroy(struct ma_state *mas, struct ma_topiary *mat)
1024 {
1025 struct maple_enode *next;
1026 struct maple_node *node;
1027 bool in_rcu = mt_in_rcu(mas->tree);
1028
1029 while (mat->head) {
1030 next = mte_to_mat(mat->head)->next;
1031 node = mte_to_node(mat->head);
1032 mt_destroy_walk(mat->head, mas->tree, !in_rcu);
1033 if (in_rcu)
1034 call_rcu(&node->rcu, mt_free_walk);
1035 mat->head = next;
1036 }
1037 }
1038 /*
1039 * mas_descend() - Descend into the slot stored in the ma_state.
1040 * @mas - the maple state.
1041 *
1042 * Note: Not RCU safe, only use in write side or debug code.
1043 */
mas_descend(struct ma_state * mas)1044 static inline void mas_descend(struct ma_state *mas)
1045 {
1046 enum maple_type type;
1047 unsigned long *pivots;
1048 struct maple_node *node;
1049 void __rcu **slots;
1050
1051 node = mas_mn(mas);
1052 type = mte_node_type(mas->node);
1053 pivots = ma_pivots(node, type);
1054 slots = ma_slots(node, type);
1055
1056 if (mas->offset)
1057 mas->min = pivots[mas->offset - 1] + 1;
1058 mas->max = mas_safe_pivot(mas, pivots, mas->offset, type);
1059 mas->node = mas_slot(mas, slots, mas->offset);
1060 }
1061
1062 /*
1063 * mte_set_gap() - Set a maple node gap.
1064 * @mn: The encoded maple node
1065 * @gap: The offset of the gap to set
1066 * @val: The gap value
1067 */
mte_set_gap(const struct maple_enode * mn,unsigned char gap,unsigned long val)1068 static inline void mte_set_gap(const struct maple_enode *mn,
1069 unsigned char gap, unsigned long val)
1070 {
1071 switch (mte_node_type(mn)) {
1072 default:
1073 break;
1074 case maple_arange_64:
1075 mte_to_node(mn)->ma64.gap[gap] = val;
1076 break;
1077 }
1078 }
1079
1080 /*
1081 * mas_ascend() - Walk up a level of the tree.
1082 * @mas: The maple state
1083 *
1084 * Sets the @mas->max and @mas->min to the correct values when walking up. This
1085 * may cause several levels of walking up to find the correct min and max.
1086 * May find a dead node which will cause a premature return.
1087 * Return: 1 on dead node, 0 otherwise
1088 */
mas_ascend(struct ma_state * mas)1089 static int mas_ascend(struct ma_state *mas)
1090 {
1091 struct maple_enode *p_enode; /* parent enode. */
1092 struct maple_enode *a_enode; /* ancestor enode. */
1093 struct maple_node *a_node; /* ancestor node. */
1094 struct maple_node *p_node; /* parent node. */
1095 unsigned char a_slot;
1096 enum maple_type a_type;
1097 unsigned long min, max;
1098 unsigned long *pivots;
1099 bool set_max = false, set_min = false;
1100
1101 a_node = mas_mn(mas);
1102 if (ma_is_root(a_node)) {
1103 mas->offset = 0;
1104 return 0;
1105 }
1106
1107 p_node = mte_parent(mas->node);
1108 if (unlikely(a_node == p_node))
1109 return 1;
1110
1111 a_type = mas_parent_type(mas, mas->node);
1112 mas->offset = mte_parent_slot(mas->node);
1113 a_enode = mt_mk_node(p_node, a_type);
1114
1115 /* Check to make sure all parent information is still accurate */
1116 if (p_node != mte_parent(mas->node))
1117 return 1;
1118
1119 mas->node = a_enode;
1120
1121 if (mte_is_root(a_enode)) {
1122 mas->max = ULONG_MAX;
1123 mas->min = 0;
1124 return 0;
1125 }
1126
1127 if (!mas->min)
1128 set_min = true;
1129
1130 if (mas->max == ULONG_MAX)
1131 set_max = true;
1132
1133 min = 0;
1134 max = ULONG_MAX;
1135 do {
1136 p_enode = a_enode;
1137 a_type = mas_parent_type(mas, p_enode);
1138 a_node = mte_parent(p_enode);
1139 a_slot = mte_parent_slot(p_enode);
1140 a_enode = mt_mk_node(a_node, a_type);
1141 pivots = ma_pivots(a_node, a_type);
1142
1143 if (unlikely(ma_dead_node(a_node)))
1144 return 1;
1145
1146 if (!set_min && a_slot) {
1147 set_min = true;
1148 min = pivots[a_slot - 1] + 1;
1149 }
1150
1151 if (!set_max && a_slot < mt_pivots[a_type]) {
1152 set_max = true;
1153 max = pivots[a_slot];
1154 }
1155
1156 if (unlikely(ma_dead_node(a_node)))
1157 return 1;
1158
1159 if (unlikely(ma_is_root(a_node)))
1160 break;
1161
1162 } while (!set_min || !set_max);
1163
1164 mas->max = max;
1165 mas->min = min;
1166 return 0;
1167 }
1168
1169 /*
1170 * mas_pop_node() - Get a previously allocated maple node from the maple state.
1171 * @mas: The maple state
1172 *
1173 * Return: A pointer to a maple node.
1174 */
mas_pop_node(struct ma_state * mas)1175 static inline struct maple_node *mas_pop_node(struct ma_state *mas)
1176 {
1177 struct maple_alloc *ret, *node = mas->alloc;
1178 unsigned long total = mas_allocated(mas);
1179 unsigned int req = mas_alloc_req(mas);
1180
1181 /* nothing or a request pending. */
1182 if (WARN_ON(!total))
1183 return NULL;
1184
1185 if (total == 1) {
1186 /* single allocation in this ma_state */
1187 mas->alloc = NULL;
1188 ret = node;
1189 goto single_node;
1190 }
1191
1192 if (node->node_count == 1) {
1193 /* Single allocation in this node. */
1194 mas->alloc = node->slot[0];
1195 mas->alloc->total = node->total - 1;
1196 ret = node;
1197 goto new_head;
1198 }
1199 node->total--;
1200 ret = node->slot[--node->node_count];
1201 node->slot[node->node_count] = NULL;
1202
1203 single_node:
1204 new_head:
1205 if (req) {
1206 req++;
1207 mas_set_alloc_req(mas, req);
1208 }
1209
1210 memset(ret, 0, sizeof(*ret));
1211 return (struct maple_node *)ret;
1212 }
1213
1214 /*
1215 * mas_push_node() - Push a node back on the maple state allocation.
1216 * @mas: The maple state
1217 * @used: The used maple node
1218 *
1219 * Stores the maple node back into @mas->alloc for reuse. Updates allocated and
1220 * requested node count as necessary.
1221 */
mas_push_node(struct ma_state * mas,struct maple_node * used)1222 static inline void mas_push_node(struct ma_state *mas, struct maple_node *used)
1223 {
1224 struct maple_alloc *reuse = (struct maple_alloc *)used;
1225 struct maple_alloc *head = mas->alloc;
1226 unsigned long count;
1227 unsigned int requested = mas_alloc_req(mas);
1228
1229 count = mas_allocated(mas);
1230
1231 reuse->request_count = 0;
1232 reuse->node_count = 0;
1233 if (count && (head->node_count < MAPLE_ALLOC_SLOTS)) {
1234 head->slot[head->node_count++] = reuse;
1235 head->total++;
1236 goto done;
1237 }
1238
1239 reuse->total = 1;
1240 if ((head) && !((unsigned long)head & 0x1)) {
1241 reuse->slot[0] = head;
1242 reuse->node_count = 1;
1243 reuse->total += head->total;
1244 }
1245
1246 mas->alloc = reuse;
1247 done:
1248 if (requested > 1)
1249 mas_set_alloc_req(mas, requested - 1);
1250 }
1251
1252 /*
1253 * mas_alloc_nodes() - Allocate nodes into a maple state
1254 * @mas: The maple state
1255 * @gfp: The GFP Flags
1256 */
mas_alloc_nodes(struct ma_state * mas,gfp_t gfp)1257 static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp)
1258 {
1259 struct maple_alloc *node;
1260 unsigned long allocated = mas_allocated(mas);
1261 unsigned int requested = mas_alloc_req(mas);
1262 unsigned int count;
1263 void **slots = NULL;
1264 unsigned int max_req = 0;
1265
1266 if (!requested)
1267 return;
1268
1269 mas_set_alloc_req(mas, 0);
1270 if (mas->mas_flags & MA_STATE_PREALLOC) {
1271 if (allocated)
1272 return;
1273 WARN_ON(!allocated);
1274 }
1275
1276 if (!allocated || mas->alloc->node_count == MAPLE_ALLOC_SLOTS) {
1277 node = (struct maple_alloc *)mt_alloc_one(gfp);
1278 if (!node)
1279 goto nomem_one;
1280
1281 if (allocated) {
1282 node->slot[0] = mas->alloc;
1283 node->node_count = 1;
1284 } else {
1285 node->node_count = 0;
1286 }
1287
1288 mas->alloc = node;
1289 node->total = ++allocated;
1290 requested--;
1291 }
1292
1293 node = mas->alloc;
1294 node->request_count = 0;
1295 while (requested) {
1296 max_req = MAPLE_ALLOC_SLOTS - node->node_count;
1297 slots = (void **)&node->slot[node->node_count];
1298 max_req = min(requested, max_req);
1299 count = mt_alloc_bulk(gfp, max_req, slots);
1300 if (!count)
1301 goto nomem_bulk;
1302
1303 if (node->node_count == 0) {
1304 node->slot[0]->node_count = 0;
1305 node->slot[0]->request_count = 0;
1306 }
1307
1308 node->node_count += count;
1309 allocated += count;
1310 node = node->slot[0];
1311 requested -= count;
1312 }
1313 mas->alloc->total = allocated;
1314 return;
1315
1316 nomem_bulk:
1317 /* Clean up potential freed allocations on bulk failure */
1318 memset(slots, 0, max_req * sizeof(unsigned long));
1319 nomem_one:
1320 mas_set_alloc_req(mas, requested);
1321 if (mas->alloc && !(((unsigned long)mas->alloc & 0x1)))
1322 mas->alloc->total = allocated;
1323 mas_set_err(mas, -ENOMEM);
1324 }
1325
1326 /*
1327 * mas_free() - Free an encoded maple node
1328 * @mas: The maple state
1329 * @used: The encoded maple node to free.
1330 *
1331 * Uses rcu free if necessary, pushes @used back on the maple state allocations
1332 * otherwise.
1333 */
mas_free(struct ma_state * mas,struct maple_enode * used)1334 static inline void mas_free(struct ma_state *mas, struct maple_enode *used)
1335 {
1336 struct maple_node *tmp = mte_to_node(used);
1337
1338 if (mt_in_rcu(mas->tree))
1339 ma_free_rcu(tmp);
1340 else
1341 mas_push_node(mas, tmp);
1342 }
1343
1344 /*
1345 * mas_node_count() - Check if enough nodes are allocated and request more if
1346 * there is not enough nodes.
1347 * @mas: The maple state
1348 * @count: The number of nodes needed
1349 * @gfp: the gfp flags
1350 */
mas_node_count_gfp(struct ma_state * mas,int count,gfp_t gfp)1351 static void mas_node_count_gfp(struct ma_state *mas, int count, gfp_t gfp)
1352 {
1353 unsigned long allocated = mas_allocated(mas);
1354
1355 if (allocated < count) {
1356 mas_set_alloc_req(mas, count - allocated);
1357 mas_alloc_nodes(mas, gfp);
1358 }
1359 }
1360
1361 /*
1362 * mas_node_count() - Check if enough nodes are allocated and request more if
1363 * there is not enough nodes.
1364 * @mas: The maple state
1365 * @count: The number of nodes needed
1366 *
1367 * Note: Uses GFP_NOWAIT | __GFP_NOWARN for gfp flags.
1368 */
mas_node_count(struct ma_state * mas,int count)1369 static void mas_node_count(struct ma_state *mas, int count)
1370 {
1371 return mas_node_count_gfp(mas, count, GFP_NOWAIT | __GFP_NOWARN);
1372 }
1373
1374 /*
1375 * mas_start() - Sets up maple state for operations.
1376 * @mas: The maple state.
1377 *
1378 * If mas->node == MAS_START, then set the min, max and depth to
1379 * defaults.
1380 *
1381 * Return:
1382 * - If mas->node is an error or not MAS_START, return NULL.
1383 * - If it's an empty tree: NULL & mas->node == MAS_NONE
1384 * - If it's a single entry: The entry & mas->node == MAS_ROOT
1385 * - If it's a tree: NULL & mas->node == safe root node.
1386 */
mas_start(struct ma_state * mas)1387 static inline struct maple_enode *mas_start(struct ma_state *mas)
1388 {
1389 if (likely(mas_is_start(mas))) {
1390 struct maple_enode *root;
1391
1392 mas->min = 0;
1393 mas->max = ULONG_MAX;
1394
1395 retry:
1396 mas->depth = 0;
1397 root = mas_root(mas);
1398 /* Tree with nodes */
1399 if (likely(xa_is_node(root))) {
1400 mas->depth = 1;
1401 mas->node = mte_safe_root(root);
1402 mas->offset = 0;
1403 if (mte_dead_node(mas->node))
1404 goto retry;
1405
1406 return NULL;
1407 }
1408
1409 /* empty tree */
1410 if (unlikely(!root)) {
1411 mas->node = MAS_NONE;
1412 mas->offset = MAPLE_NODE_SLOTS;
1413 return NULL;
1414 }
1415
1416 /* Single entry tree */
1417 mas->node = MAS_ROOT;
1418 mas->offset = MAPLE_NODE_SLOTS;
1419
1420 /* Single entry tree. */
1421 if (mas->index > 0)
1422 return NULL;
1423
1424 return root;
1425 }
1426
1427 return NULL;
1428 }
1429
1430 /*
1431 * ma_data_end() - Find the end of the data in a node.
1432 * @node: The maple node
1433 * @type: The maple node type
1434 * @pivots: The array of pivots in the node
1435 * @max: The maximum value in the node
1436 *
1437 * Uses metadata to find the end of the data when possible.
1438 * Return: The zero indexed last slot with data (may be null).
1439 */
ma_data_end(struct maple_node * node,enum maple_type type,unsigned long * pivots,unsigned long max)1440 static inline unsigned char ma_data_end(struct maple_node *node,
1441 enum maple_type type,
1442 unsigned long *pivots,
1443 unsigned long max)
1444 {
1445 unsigned char offset;
1446
1447 if (!pivots)
1448 return 0;
1449
1450 if (type == maple_arange_64)
1451 return ma_meta_end(node, type);
1452
1453 offset = mt_pivots[type] - 1;
1454 if (likely(!pivots[offset]))
1455 return ma_meta_end(node, type);
1456
1457 if (likely(pivots[offset] == max))
1458 return offset;
1459
1460 return mt_pivots[type];
1461 }
1462
1463 /*
1464 * mas_data_end() - Find the end of the data (slot).
1465 * @mas: the maple state
1466 *
1467 * This method is optimized to check the metadata of a node if the node type
1468 * supports data end metadata.
1469 *
1470 * Return: The zero indexed last slot with data (may be null).
1471 */
mas_data_end(struct ma_state * mas)1472 static inline unsigned char mas_data_end(struct ma_state *mas)
1473 {
1474 enum maple_type type;
1475 struct maple_node *node;
1476 unsigned char offset;
1477 unsigned long *pivots;
1478
1479 type = mte_node_type(mas->node);
1480 node = mas_mn(mas);
1481 if (type == maple_arange_64)
1482 return ma_meta_end(node, type);
1483
1484 pivots = ma_pivots(node, type);
1485 if (unlikely(ma_dead_node(node)))
1486 return 0;
1487
1488 offset = mt_pivots[type] - 1;
1489 if (likely(!pivots[offset]))
1490 return ma_meta_end(node, type);
1491
1492 if (likely(pivots[offset] == mas->max))
1493 return offset;
1494
1495 return mt_pivots[type];
1496 }
1497
1498 /*
1499 * mas_leaf_max_gap() - Returns the largest gap in a leaf node
1500 * @mas - the maple state
1501 *
1502 * Return: The maximum gap in the leaf.
1503 */
mas_leaf_max_gap(struct ma_state * mas)1504 static unsigned long mas_leaf_max_gap(struct ma_state *mas)
1505 {
1506 enum maple_type mt;
1507 unsigned long pstart, gap, max_gap;
1508 struct maple_node *mn;
1509 unsigned long *pivots;
1510 void __rcu **slots;
1511 unsigned char i;
1512 unsigned char max_piv;
1513
1514 mt = mte_node_type(mas->node);
1515 mn = mas_mn(mas);
1516 slots = ma_slots(mn, mt);
1517 max_gap = 0;
1518 if (unlikely(ma_is_dense(mt))) {
1519 gap = 0;
1520 for (i = 0; i < mt_slots[mt]; i++) {
1521 if (slots[i]) {
1522 if (gap > max_gap)
1523 max_gap = gap;
1524 gap = 0;
1525 } else {
1526 gap++;
1527 }
1528 }
1529 if (gap > max_gap)
1530 max_gap = gap;
1531 return max_gap;
1532 }
1533
1534 /*
1535 * Check the first implied pivot optimizes the loop below and slot 1 may
1536 * be skipped if there is a gap in slot 0.
1537 */
1538 pivots = ma_pivots(mn, mt);
1539 if (likely(!slots[0])) {
1540 max_gap = pivots[0] - mas->min + 1;
1541 i = 2;
1542 } else {
1543 i = 1;
1544 }
1545
1546 /* reduce max_piv as the special case is checked before the loop */
1547 max_piv = ma_data_end(mn, mt, pivots, mas->max) - 1;
1548 /*
1549 * Check end implied pivot which can only be a gap on the right most
1550 * node.
1551 */
1552 if (unlikely(mas->max == ULONG_MAX) && !slots[max_piv + 1]) {
1553 gap = ULONG_MAX - pivots[max_piv];
1554 if (gap > max_gap)
1555 max_gap = gap;
1556 }
1557
1558 for (; i <= max_piv; i++) {
1559 /* data == no gap. */
1560 if (likely(slots[i]))
1561 continue;
1562
1563 pstart = pivots[i - 1];
1564 gap = pivots[i] - pstart;
1565 if (gap > max_gap)
1566 max_gap = gap;
1567
1568 /* There cannot be two gaps in a row. */
1569 i++;
1570 }
1571 return max_gap;
1572 }
1573
1574 /*
1575 * ma_max_gap() - Get the maximum gap in a maple node (non-leaf)
1576 * @node: The maple node
1577 * @gaps: The pointer to the gaps
1578 * @mt: The maple node type
1579 * @*off: Pointer to store the offset location of the gap.
1580 *
1581 * Uses the metadata data end to scan backwards across set gaps.
1582 *
1583 * Return: The maximum gap value
1584 */
1585 static inline unsigned long
ma_max_gap(struct maple_node * node,unsigned long * gaps,enum maple_type mt,unsigned char * off)1586 ma_max_gap(struct maple_node *node, unsigned long *gaps, enum maple_type mt,
1587 unsigned char *off)
1588 {
1589 unsigned char offset, i;
1590 unsigned long max_gap = 0;
1591
1592 i = offset = ma_meta_end(node, mt);
1593 do {
1594 if (gaps[i] > max_gap) {
1595 max_gap = gaps[i];
1596 offset = i;
1597 }
1598 } while (i--);
1599
1600 *off = offset;
1601 return max_gap;
1602 }
1603
1604 /*
1605 * mas_max_gap() - find the largest gap in a non-leaf node and set the slot.
1606 * @mas: The maple state.
1607 *
1608 * Return: The gap value.
1609 */
mas_max_gap(struct ma_state * mas)1610 static inline unsigned long mas_max_gap(struct ma_state *mas)
1611 {
1612 unsigned long *gaps;
1613 unsigned char offset;
1614 enum maple_type mt;
1615 struct maple_node *node;
1616
1617 mt = mte_node_type(mas->node);
1618 if (ma_is_leaf(mt))
1619 return mas_leaf_max_gap(mas);
1620
1621 node = mas_mn(mas);
1622 MAS_BUG_ON(mas, mt != maple_arange_64);
1623 offset = ma_meta_gap(node, mt);
1624 gaps = ma_gaps(node, mt);
1625 return gaps[offset];
1626 }
1627
1628 /*
1629 * mas_parent_gap() - Set the parent gap and any gaps above, as needed
1630 * @mas: The maple state
1631 * @offset: The gap offset in the parent to set
1632 * @new: The new gap value.
1633 *
1634 * Set the parent gap then continue to set the gap upwards, using the metadata
1635 * of the parent to see if it is necessary to check the node above.
1636 */
mas_parent_gap(struct ma_state * mas,unsigned char offset,unsigned long new)1637 static inline void mas_parent_gap(struct ma_state *mas, unsigned char offset,
1638 unsigned long new)
1639 {
1640 unsigned long meta_gap = 0;
1641 struct maple_node *pnode;
1642 struct maple_enode *penode;
1643 unsigned long *pgaps;
1644 unsigned char meta_offset;
1645 enum maple_type pmt;
1646
1647 pnode = mte_parent(mas->node);
1648 pmt = mas_parent_type(mas, mas->node);
1649 penode = mt_mk_node(pnode, pmt);
1650 pgaps = ma_gaps(pnode, pmt);
1651
1652 ascend:
1653 MAS_BUG_ON(mas, pmt != maple_arange_64);
1654 meta_offset = ma_meta_gap(pnode, pmt);
1655 meta_gap = pgaps[meta_offset];
1656
1657 pgaps[offset] = new;
1658
1659 if (meta_gap == new)
1660 return;
1661
1662 if (offset != meta_offset) {
1663 if (meta_gap > new)
1664 return;
1665
1666 ma_set_meta_gap(pnode, pmt, offset);
1667 } else if (new < meta_gap) {
1668 new = ma_max_gap(pnode, pgaps, pmt, &meta_offset);
1669 ma_set_meta_gap(pnode, pmt, meta_offset);
1670 }
1671
1672 if (ma_is_root(pnode))
1673 return;
1674
1675 /* Go to the parent node. */
1676 pnode = mte_parent(penode);
1677 pmt = mas_parent_type(mas, penode);
1678 pgaps = ma_gaps(pnode, pmt);
1679 offset = mte_parent_slot(penode);
1680 penode = mt_mk_node(pnode, pmt);
1681 goto ascend;
1682 }
1683
1684 /*
1685 * mas_update_gap() - Update a nodes gaps and propagate up if necessary.
1686 * @mas - the maple state.
1687 */
mas_update_gap(struct ma_state * mas)1688 static inline void mas_update_gap(struct ma_state *mas)
1689 {
1690 unsigned char pslot;
1691 unsigned long p_gap;
1692 unsigned long max_gap;
1693
1694 if (!mt_is_alloc(mas->tree))
1695 return;
1696
1697 if (mte_is_root(mas->node))
1698 return;
1699
1700 max_gap = mas_max_gap(mas);
1701
1702 pslot = mte_parent_slot(mas->node);
1703 p_gap = ma_gaps(mte_parent(mas->node),
1704 mas_parent_type(mas, mas->node))[pslot];
1705
1706 if (p_gap != max_gap)
1707 mas_parent_gap(mas, pslot, max_gap);
1708 }
1709
1710 /*
1711 * mas_adopt_children() - Set the parent pointer of all nodes in @parent to
1712 * @parent with the slot encoded.
1713 * @mas - the maple state (for the tree)
1714 * @parent - the maple encoded node containing the children.
1715 */
mas_adopt_children(struct ma_state * mas,struct maple_enode * parent)1716 static inline void mas_adopt_children(struct ma_state *mas,
1717 struct maple_enode *parent)
1718 {
1719 enum maple_type type = mte_node_type(parent);
1720 struct maple_node *node = mte_to_node(parent);
1721 void __rcu **slots = ma_slots(node, type);
1722 unsigned long *pivots = ma_pivots(node, type);
1723 struct maple_enode *child;
1724 unsigned char offset;
1725
1726 offset = ma_data_end(node, type, pivots, mas->max);
1727 do {
1728 child = mas_slot_locked(mas, slots, offset);
1729 mas_set_parent(mas, child, parent, offset);
1730 } while (offset--);
1731 }
1732
1733 /*
1734 * mas_put_in_tree() - Put a new node in the tree, smp_wmb(), and mark the old
1735 * node as dead.
1736 * @mas - the maple state with the new node
1737 * @old_enode - The old maple encoded node to replace.
1738 */
mas_put_in_tree(struct ma_state * mas,struct maple_enode * old_enode)1739 static inline void mas_put_in_tree(struct ma_state *mas,
1740 struct maple_enode *old_enode)
1741 __must_hold(mas->tree->ma_lock)
1742 {
1743 unsigned char offset;
1744 void __rcu **slots;
1745
1746 if (mte_is_root(mas->node)) {
1747 mas_mn(mas)->parent = ma_parent_ptr(mas_tree_parent(mas));
1748 rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
1749 mas_set_height(mas);
1750 } else {
1751
1752 offset = mte_parent_slot(mas->node);
1753 slots = ma_slots(mte_parent(mas->node),
1754 mas_parent_type(mas, mas->node));
1755 rcu_assign_pointer(slots[offset], mas->node);
1756 }
1757
1758 mte_set_node_dead(old_enode);
1759 }
1760
1761 /*
1762 * mas_replace_node() - Replace a node by putting it in the tree, marking it
1763 * dead, and freeing it.
1764 * the parent encoding to locate the maple node in the tree.
1765 * @mas - the ma_state with @mas->node pointing to the new node.
1766 * @old_enode - The old maple encoded node.
1767 */
mas_replace_node(struct ma_state * mas,struct maple_enode * old_enode)1768 static inline void mas_replace_node(struct ma_state *mas,
1769 struct maple_enode *old_enode)
1770 __must_hold(mas->tree->ma_lock)
1771 {
1772 mas_put_in_tree(mas, old_enode);
1773 mas_free(mas, old_enode);
1774 }
1775
1776 /*
1777 * mas_find_child() - Find a child who has the parent @mas->node.
1778 * @mas: the maple state with the parent.
1779 * @child: the maple state to store the child.
1780 */
mas_find_child(struct ma_state * mas,struct ma_state * child)1781 static inline bool mas_find_child(struct ma_state *mas, struct ma_state *child)
1782 __must_hold(mas->tree->ma_lock)
1783 {
1784 enum maple_type mt;
1785 unsigned char offset;
1786 unsigned char end;
1787 unsigned long *pivots;
1788 struct maple_enode *entry;
1789 struct maple_node *node;
1790 void __rcu **slots;
1791
1792 mt = mte_node_type(mas->node);
1793 node = mas_mn(mas);
1794 slots = ma_slots(node, mt);
1795 pivots = ma_pivots(node, mt);
1796 end = ma_data_end(node, mt, pivots, mas->max);
1797 for (offset = mas->offset; offset <= end; offset++) {
1798 entry = mas_slot_locked(mas, slots, offset);
1799 if (mte_parent(entry) == node) {
1800 *child = *mas;
1801 mas->offset = offset + 1;
1802 child->offset = offset;
1803 mas_descend(child);
1804 child->offset = 0;
1805 return true;
1806 }
1807 }
1808 return false;
1809 }
1810
1811 /*
1812 * mab_shift_right() - Shift the data in mab right. Note, does not clean out the
1813 * old data or set b_node->b_end.
1814 * @b_node: the maple_big_node
1815 * @shift: the shift count
1816 */
mab_shift_right(struct maple_big_node * b_node,unsigned char shift)1817 static inline void mab_shift_right(struct maple_big_node *b_node,
1818 unsigned char shift)
1819 {
1820 unsigned long size = b_node->b_end * sizeof(unsigned long);
1821
1822 memmove(b_node->pivot + shift, b_node->pivot, size);
1823 memmove(b_node->slot + shift, b_node->slot, size);
1824 if (b_node->type == maple_arange_64)
1825 memmove(b_node->gap + shift, b_node->gap, size);
1826 }
1827
1828 /*
1829 * mab_middle_node() - Check if a middle node is needed (unlikely)
1830 * @b_node: the maple_big_node that contains the data.
1831 * @size: the amount of data in the b_node
1832 * @split: the potential split location
1833 * @slot_count: the size that can be stored in a single node being considered.
1834 *
1835 * Return: true if a middle node is required.
1836 */
mab_middle_node(struct maple_big_node * b_node,int split,unsigned char slot_count)1837 static inline bool mab_middle_node(struct maple_big_node *b_node, int split,
1838 unsigned char slot_count)
1839 {
1840 unsigned char size = b_node->b_end;
1841
1842 if (size >= 2 * slot_count)
1843 return true;
1844
1845 if (!b_node->slot[split] && (size >= 2 * slot_count - 1))
1846 return true;
1847
1848 return false;
1849 }
1850
1851 /*
1852 * mab_no_null_split() - ensure the split doesn't fall on a NULL
1853 * @b_node: the maple_big_node with the data
1854 * @split: the suggested split location
1855 * @slot_count: the number of slots in the node being considered.
1856 *
1857 * Return: the split location.
1858 */
mab_no_null_split(struct maple_big_node * b_node,unsigned char split,unsigned char slot_count)1859 static inline int mab_no_null_split(struct maple_big_node *b_node,
1860 unsigned char split, unsigned char slot_count)
1861 {
1862 if (!b_node->slot[split]) {
1863 /*
1864 * If the split is less than the max slot && the right side will
1865 * still be sufficient, then increment the split on NULL.
1866 */
1867 if ((split < slot_count - 1) &&
1868 (b_node->b_end - split) > (mt_min_slots[b_node->type]))
1869 split++;
1870 else
1871 split--;
1872 }
1873 return split;
1874 }
1875
1876 /*
1877 * mab_calc_split() - Calculate the split location and if there needs to be two
1878 * splits.
1879 * @bn: The maple_big_node with the data
1880 * @mid_split: The second split, if required. 0 otherwise.
1881 *
1882 * Return: The first split location. The middle split is set in @mid_split.
1883 */
mab_calc_split(struct ma_state * mas,struct maple_big_node * bn,unsigned char * mid_split,unsigned long min)1884 static inline int mab_calc_split(struct ma_state *mas,
1885 struct maple_big_node *bn, unsigned char *mid_split, unsigned long min)
1886 {
1887 unsigned char b_end = bn->b_end;
1888 int split = b_end / 2; /* Assume equal split. */
1889 unsigned char slot_min, slot_count = mt_slots[bn->type];
1890
1891 /*
1892 * To support gap tracking, all NULL entries are kept together and a node cannot
1893 * end on a NULL entry, with the exception of the left-most leaf. The
1894 * limitation means that the split of a node must be checked for this condition
1895 * and be able to put more data in one direction or the other.
1896 */
1897 if (unlikely((mas->mas_flags & MA_STATE_BULK))) {
1898 *mid_split = 0;
1899 split = b_end - mt_min_slots[bn->type];
1900
1901 if (!ma_is_leaf(bn->type))
1902 return split;
1903
1904 mas->mas_flags |= MA_STATE_REBALANCE;
1905 if (!bn->slot[split])
1906 split--;
1907 return split;
1908 }
1909
1910 /*
1911 * Although extremely rare, it is possible to enter what is known as the 3-way
1912 * split scenario. The 3-way split comes about by means of a store of a range
1913 * that overwrites the end and beginning of two full nodes. The result is a set
1914 * of entries that cannot be stored in 2 nodes. Sometimes, these two nodes can
1915 * also be located in different parent nodes which are also full. This can
1916 * carry upwards all the way to the root in the worst case.
1917 */
1918 if (unlikely(mab_middle_node(bn, split, slot_count))) {
1919 split = b_end / 3;
1920 *mid_split = split * 2;
1921 } else {
1922 slot_min = mt_min_slots[bn->type];
1923
1924 *mid_split = 0;
1925 /*
1926 * Avoid having a range less than the slot count unless it
1927 * causes one node to be deficient.
1928 * NOTE: mt_min_slots is 1 based, b_end and split are zero.
1929 */
1930 while ((split < slot_count - 1) &&
1931 ((bn->pivot[split] - min) < slot_count - 1) &&
1932 (b_end - split > slot_min))
1933 split++;
1934 }
1935
1936 /* Avoid ending a node on a NULL entry */
1937 split = mab_no_null_split(bn, split, slot_count);
1938
1939 if (unlikely(*mid_split))
1940 *mid_split = mab_no_null_split(bn, *mid_split, slot_count);
1941
1942 return split;
1943 }
1944
1945 /*
1946 * mas_mab_cp() - Copy data from a maple state inclusively to a maple_big_node
1947 * and set @b_node->b_end to the next free slot.
1948 * @mas: The maple state
1949 * @mas_start: The starting slot to copy
1950 * @mas_end: The end slot to copy (inclusively)
1951 * @b_node: The maple_big_node to place the data
1952 * @mab_start: The starting location in maple_big_node to store the data.
1953 */
mas_mab_cp(struct ma_state * mas,unsigned char mas_start,unsigned char mas_end,struct maple_big_node * b_node,unsigned char mab_start)1954 static inline void mas_mab_cp(struct ma_state *mas, unsigned char mas_start,
1955 unsigned char mas_end, struct maple_big_node *b_node,
1956 unsigned char mab_start)
1957 {
1958 enum maple_type mt;
1959 struct maple_node *node;
1960 void __rcu **slots;
1961 unsigned long *pivots, *gaps;
1962 int i = mas_start, j = mab_start;
1963 unsigned char piv_end;
1964
1965 node = mas_mn(mas);
1966 mt = mte_node_type(mas->node);
1967 pivots = ma_pivots(node, mt);
1968 if (!i) {
1969 b_node->pivot[j] = pivots[i++];
1970 if (unlikely(i > mas_end))
1971 goto complete;
1972 j++;
1973 }
1974
1975 piv_end = min(mas_end, mt_pivots[mt]);
1976 for (; i < piv_end; i++, j++) {
1977 b_node->pivot[j] = pivots[i];
1978 if (unlikely(!b_node->pivot[j]))
1979 break;
1980
1981 if (unlikely(mas->max == b_node->pivot[j]))
1982 goto complete;
1983 }
1984
1985 if (likely(i <= mas_end))
1986 b_node->pivot[j] = mas_safe_pivot(mas, pivots, i, mt);
1987
1988 complete:
1989 b_node->b_end = ++j;
1990 j -= mab_start;
1991 slots = ma_slots(node, mt);
1992 memcpy(b_node->slot + mab_start, slots + mas_start, sizeof(void *) * j);
1993 if (!ma_is_leaf(mt) && mt_is_alloc(mas->tree)) {
1994 gaps = ma_gaps(node, mt);
1995 memcpy(b_node->gap + mab_start, gaps + mas_start,
1996 sizeof(unsigned long) * j);
1997 }
1998 }
1999
2000 /*
2001 * mas_leaf_set_meta() - Set the metadata of a leaf if possible.
2002 * @mas: The maple state
2003 * @node: The maple node
2004 * @pivots: pointer to the maple node pivots
2005 * @mt: The maple type
2006 * @end: The assumed end
2007 *
2008 * Note, end may be incremented within this function but not modified at the
2009 * source. This is fine since the metadata is the last thing to be stored in a
2010 * node during a write.
2011 */
mas_leaf_set_meta(struct ma_state * mas,struct maple_node * node,unsigned long * pivots,enum maple_type mt,unsigned char end)2012 static inline void mas_leaf_set_meta(struct ma_state *mas,
2013 struct maple_node *node, unsigned long *pivots,
2014 enum maple_type mt, unsigned char end)
2015 {
2016 /* There is no room for metadata already */
2017 if (mt_pivots[mt] <= end)
2018 return;
2019
2020 if (pivots[end] && pivots[end] < mas->max)
2021 end++;
2022
2023 if (end < mt_slots[mt] - 1)
2024 ma_set_meta(node, mt, 0, end);
2025 }
2026
2027 /*
2028 * mab_mas_cp() - Copy data from maple_big_node to a maple encoded node.
2029 * @b_node: the maple_big_node that has the data
2030 * @mab_start: the start location in @b_node.
2031 * @mab_end: The end location in @b_node (inclusively)
2032 * @mas: The maple state with the maple encoded node.
2033 */
mab_mas_cp(struct maple_big_node * b_node,unsigned char mab_start,unsigned char mab_end,struct ma_state * mas,bool new_max)2034 static inline void mab_mas_cp(struct maple_big_node *b_node,
2035 unsigned char mab_start, unsigned char mab_end,
2036 struct ma_state *mas, bool new_max)
2037 {
2038 int i, j = 0;
2039 enum maple_type mt = mte_node_type(mas->node);
2040 struct maple_node *node = mte_to_node(mas->node);
2041 void __rcu **slots = ma_slots(node, mt);
2042 unsigned long *pivots = ma_pivots(node, mt);
2043 unsigned long *gaps = NULL;
2044 unsigned char end;
2045
2046 if (mab_end - mab_start > mt_pivots[mt])
2047 mab_end--;
2048
2049 if (!pivots[mt_pivots[mt] - 1])
2050 slots[mt_pivots[mt]] = NULL;
2051
2052 i = mab_start;
2053 do {
2054 pivots[j++] = b_node->pivot[i++];
2055 } while (i <= mab_end && likely(b_node->pivot[i]));
2056
2057 memcpy(slots, b_node->slot + mab_start,
2058 sizeof(void *) * (i - mab_start));
2059
2060 if (new_max)
2061 mas->max = b_node->pivot[i - 1];
2062
2063 end = j - 1;
2064 if (likely(!ma_is_leaf(mt) && mt_is_alloc(mas->tree))) {
2065 unsigned long max_gap = 0;
2066 unsigned char offset = 0;
2067
2068 gaps = ma_gaps(node, mt);
2069 do {
2070 gaps[--j] = b_node->gap[--i];
2071 if (gaps[j] > max_gap) {
2072 offset = j;
2073 max_gap = gaps[j];
2074 }
2075 } while (j);
2076
2077 ma_set_meta(node, mt, offset, end);
2078 } else {
2079 mas_leaf_set_meta(mas, node, pivots, mt, end);
2080 }
2081 }
2082
2083 /*
2084 * mas_bulk_rebalance() - Rebalance the end of a tree after a bulk insert.
2085 * @mas: The maple state
2086 * @end: The maple node end
2087 * @mt: The maple node type
2088 */
mas_bulk_rebalance(struct ma_state * mas,unsigned char end,enum maple_type mt)2089 static inline void mas_bulk_rebalance(struct ma_state *mas, unsigned char end,
2090 enum maple_type mt)
2091 {
2092 if (!(mas->mas_flags & MA_STATE_BULK))
2093 return;
2094
2095 if (mte_is_root(mas->node))
2096 return;
2097
2098 if (end > mt_min_slots[mt]) {
2099 mas->mas_flags &= ~MA_STATE_REBALANCE;
2100 return;
2101 }
2102 }
2103
2104 /*
2105 * mas_store_b_node() - Store an @entry into the b_node while also copying the
2106 * data from a maple encoded node.
2107 * @wr_mas: the maple write state
2108 * @b_node: the maple_big_node to fill with data
2109 * @offset_end: the offset to end copying
2110 *
2111 * Return: The actual end of the data stored in @b_node
2112 */
mas_store_b_node(struct ma_wr_state * wr_mas,struct maple_big_node * b_node,unsigned char offset_end)2113 static noinline_for_kasan void mas_store_b_node(struct ma_wr_state *wr_mas,
2114 struct maple_big_node *b_node, unsigned char offset_end)
2115 {
2116 unsigned char slot;
2117 unsigned char b_end;
2118 /* Possible underflow of piv will wrap back to 0 before use. */
2119 unsigned long piv;
2120 struct ma_state *mas = wr_mas->mas;
2121
2122 b_node->type = wr_mas->type;
2123 b_end = 0;
2124 slot = mas->offset;
2125 if (slot) {
2126 /* Copy start data up to insert. */
2127 mas_mab_cp(mas, 0, slot - 1, b_node, 0);
2128 b_end = b_node->b_end;
2129 piv = b_node->pivot[b_end - 1];
2130 } else
2131 piv = mas->min - 1;
2132
2133 if (piv + 1 < mas->index) {
2134 /* Handle range starting after old range */
2135 b_node->slot[b_end] = wr_mas->content;
2136 if (!wr_mas->content)
2137 b_node->gap[b_end] = mas->index - 1 - piv;
2138 b_node->pivot[b_end++] = mas->index - 1;
2139 }
2140
2141 /* Store the new entry. */
2142 mas->offset = b_end;
2143 b_node->slot[b_end] = wr_mas->entry;
2144 b_node->pivot[b_end] = mas->last;
2145
2146 /* Appended. */
2147 if (mas->last >= mas->max)
2148 goto b_end;
2149
2150 /* Handle new range ending before old range ends */
2151 piv = mas_safe_pivot(mas, wr_mas->pivots, offset_end, wr_mas->type);
2152 if (piv > mas->last) {
2153 if (piv == ULONG_MAX)
2154 mas_bulk_rebalance(mas, b_node->b_end, wr_mas->type);
2155
2156 if (offset_end != slot)
2157 wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
2158 offset_end);
2159
2160 b_node->slot[++b_end] = wr_mas->content;
2161 if (!wr_mas->content)
2162 b_node->gap[b_end] = piv - mas->last + 1;
2163 b_node->pivot[b_end] = piv;
2164 }
2165
2166 slot = offset_end + 1;
2167 if (slot > wr_mas->node_end)
2168 goto b_end;
2169
2170 /* Copy end data to the end of the node. */
2171 mas_mab_cp(mas, slot, wr_mas->node_end + 1, b_node, ++b_end);
2172 b_node->b_end--;
2173 return;
2174
2175 b_end:
2176 b_node->b_end = b_end;
2177 }
2178
2179 /*
2180 * mas_prev_sibling() - Find the previous node with the same parent.
2181 * @mas: the maple state
2182 *
2183 * Return: True if there is a previous sibling, false otherwise.
2184 */
mas_prev_sibling(struct ma_state * mas)2185 static inline bool mas_prev_sibling(struct ma_state *mas)
2186 {
2187 unsigned int p_slot = mte_parent_slot(mas->node);
2188
2189 if (mte_is_root(mas->node))
2190 return false;
2191
2192 if (!p_slot)
2193 return false;
2194
2195 mas_ascend(mas);
2196 mas->offset = p_slot - 1;
2197 mas_descend(mas);
2198 return true;
2199 }
2200
2201 /*
2202 * mas_next_sibling() - Find the next node with the same parent.
2203 * @mas: the maple state
2204 *
2205 * Return: true if there is a next sibling, false otherwise.
2206 */
mas_next_sibling(struct ma_state * mas)2207 static inline bool mas_next_sibling(struct ma_state *mas)
2208 {
2209 MA_STATE(parent, mas->tree, mas->index, mas->last);
2210
2211 if (mte_is_root(mas->node))
2212 return false;
2213
2214 parent = *mas;
2215 mas_ascend(&parent);
2216 parent.offset = mte_parent_slot(mas->node) + 1;
2217 if (parent.offset > mas_data_end(&parent))
2218 return false;
2219
2220 *mas = parent;
2221 mas_descend(mas);
2222 return true;
2223 }
2224
2225 /*
2226 * mte_node_or_node() - Return the encoded node or MAS_NONE.
2227 * @enode: The encoded maple node.
2228 *
2229 * Shorthand to avoid setting %NULLs in the tree or maple_subtree_state.
2230 *
2231 * Return: @enode or MAS_NONE
2232 */
mte_node_or_none(struct maple_enode * enode)2233 static inline struct maple_enode *mte_node_or_none(struct maple_enode *enode)
2234 {
2235 if (enode)
2236 return enode;
2237
2238 return ma_enode_ptr(MAS_NONE);
2239 }
2240
2241 /*
2242 * mas_wr_node_walk() - Find the correct offset for the index in the @mas.
2243 * @wr_mas: The maple write state
2244 *
2245 * Uses mas_slot_locked() and does not need to worry about dead nodes.
2246 */
mas_wr_node_walk(struct ma_wr_state * wr_mas)2247 static inline void mas_wr_node_walk(struct ma_wr_state *wr_mas)
2248 {
2249 struct ma_state *mas = wr_mas->mas;
2250 unsigned char count, offset;
2251
2252 if (unlikely(ma_is_dense(wr_mas->type))) {
2253 wr_mas->r_max = wr_mas->r_min = mas->index;
2254 mas->offset = mas->index = mas->min;
2255 return;
2256 }
2257
2258 wr_mas->node = mas_mn(wr_mas->mas);
2259 wr_mas->pivots = ma_pivots(wr_mas->node, wr_mas->type);
2260 count = wr_mas->node_end = ma_data_end(wr_mas->node, wr_mas->type,
2261 wr_mas->pivots, mas->max);
2262 offset = mas->offset;
2263
2264 while (offset < count && mas->index > wr_mas->pivots[offset])
2265 offset++;
2266
2267 wr_mas->r_max = offset < count ? wr_mas->pivots[offset] : mas->max;
2268 wr_mas->r_min = mas_safe_min(mas, wr_mas->pivots, offset);
2269 wr_mas->offset_end = mas->offset = offset;
2270 }
2271
2272 /*
2273 * mast_rebalance_next() - Rebalance against the next node
2274 * @mast: The maple subtree state
2275 * @old_r: The encoded maple node to the right (next node).
2276 */
mast_rebalance_next(struct maple_subtree_state * mast)2277 static inline void mast_rebalance_next(struct maple_subtree_state *mast)
2278 {
2279 unsigned char b_end = mast->bn->b_end;
2280
2281 mas_mab_cp(mast->orig_r, 0, mt_slot_count(mast->orig_r->node),
2282 mast->bn, b_end);
2283 mast->orig_r->last = mast->orig_r->max;
2284 }
2285
2286 /*
2287 * mast_rebalance_prev() - Rebalance against the previous node
2288 * @mast: The maple subtree state
2289 * @old_l: The encoded maple node to the left (previous node)
2290 */
mast_rebalance_prev(struct maple_subtree_state * mast)2291 static inline void mast_rebalance_prev(struct maple_subtree_state *mast)
2292 {
2293 unsigned char end = mas_data_end(mast->orig_l) + 1;
2294 unsigned char b_end = mast->bn->b_end;
2295
2296 mab_shift_right(mast->bn, end);
2297 mas_mab_cp(mast->orig_l, 0, end - 1, mast->bn, 0);
2298 mast->l->min = mast->orig_l->min;
2299 mast->orig_l->index = mast->orig_l->min;
2300 mast->bn->b_end = end + b_end;
2301 mast->l->offset += end;
2302 }
2303
2304 /*
2305 * mast_spanning_rebalance() - Rebalance nodes with nearest neighbour favouring
2306 * the node to the right. Checking the nodes to the right then the left at each
2307 * level upwards until root is reached.
2308 * Data is copied into the @mast->bn.
2309 * @mast: The maple_subtree_state.
2310 */
2311 static inline
mast_spanning_rebalance(struct maple_subtree_state * mast)2312 bool mast_spanning_rebalance(struct maple_subtree_state *mast)
2313 {
2314 struct ma_state r_tmp = *mast->orig_r;
2315 struct ma_state l_tmp = *mast->orig_l;
2316 unsigned char depth = 0;
2317
2318 r_tmp = *mast->orig_r;
2319 l_tmp = *mast->orig_l;
2320 do {
2321 mas_ascend(mast->orig_r);
2322 mas_ascend(mast->orig_l);
2323 depth++;
2324 if (mast->orig_r->offset < mas_data_end(mast->orig_r)) {
2325 mast->orig_r->offset++;
2326 do {
2327 mas_descend(mast->orig_r);
2328 mast->orig_r->offset = 0;
2329 } while (--depth);
2330
2331 mast_rebalance_next(mast);
2332 *mast->orig_l = l_tmp;
2333 return true;
2334 } else if (mast->orig_l->offset != 0) {
2335 mast->orig_l->offset--;
2336 do {
2337 mas_descend(mast->orig_l);
2338 mast->orig_l->offset =
2339 mas_data_end(mast->orig_l);
2340 } while (--depth);
2341
2342 mast_rebalance_prev(mast);
2343 *mast->orig_r = r_tmp;
2344 return true;
2345 }
2346 } while (!mte_is_root(mast->orig_r->node));
2347
2348 *mast->orig_r = r_tmp;
2349 *mast->orig_l = l_tmp;
2350 return false;
2351 }
2352
2353 /*
2354 * mast_ascend() - Ascend the original left and right maple states.
2355 * @mast: the maple subtree state.
2356 *
2357 * Ascend the original left and right sides. Set the offsets to point to the
2358 * data already in the new tree (@mast->l and @mast->r).
2359 */
mast_ascend(struct maple_subtree_state * mast)2360 static inline void mast_ascend(struct maple_subtree_state *mast)
2361 {
2362 MA_WR_STATE(wr_mas, mast->orig_r, NULL);
2363 mas_ascend(mast->orig_l);
2364 mas_ascend(mast->orig_r);
2365
2366 mast->orig_r->offset = 0;
2367 mast->orig_r->index = mast->r->max;
2368 /* last should be larger than or equal to index */
2369 if (mast->orig_r->last < mast->orig_r->index)
2370 mast->orig_r->last = mast->orig_r->index;
2371
2372 wr_mas.type = mte_node_type(mast->orig_r->node);
2373 mas_wr_node_walk(&wr_mas);
2374 /* Set up the left side of things */
2375 mast->orig_l->offset = 0;
2376 mast->orig_l->index = mast->l->min;
2377 wr_mas.mas = mast->orig_l;
2378 wr_mas.type = mte_node_type(mast->orig_l->node);
2379 mas_wr_node_walk(&wr_mas);
2380
2381 mast->bn->type = wr_mas.type;
2382 }
2383
2384 /*
2385 * mas_new_ma_node() - Create and return a new maple node. Helper function.
2386 * @mas: the maple state with the allocations.
2387 * @b_node: the maple_big_node with the type encoding.
2388 *
2389 * Use the node type from the maple_big_node to allocate a new node from the
2390 * ma_state. This function exists mainly for code readability.
2391 *
2392 * Return: A new maple encoded node
2393 */
2394 static inline struct maple_enode
mas_new_ma_node(struct ma_state * mas,struct maple_big_node * b_node)2395 *mas_new_ma_node(struct ma_state *mas, struct maple_big_node *b_node)
2396 {
2397 return mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)), b_node->type);
2398 }
2399
2400 /*
2401 * mas_mab_to_node() - Set up right and middle nodes
2402 *
2403 * @mas: the maple state that contains the allocations.
2404 * @b_node: the node which contains the data.
2405 * @left: The pointer which will have the left node
2406 * @right: The pointer which may have the right node
2407 * @middle: the pointer which may have the middle node (rare)
2408 * @mid_split: the split location for the middle node
2409 *
2410 * Return: the split of left.
2411 */
mas_mab_to_node(struct ma_state * mas,struct maple_big_node * b_node,struct maple_enode ** left,struct maple_enode ** right,struct maple_enode ** middle,unsigned char * mid_split,unsigned long min)2412 static inline unsigned char mas_mab_to_node(struct ma_state *mas,
2413 struct maple_big_node *b_node, struct maple_enode **left,
2414 struct maple_enode **right, struct maple_enode **middle,
2415 unsigned char *mid_split, unsigned long min)
2416 {
2417 unsigned char split = 0;
2418 unsigned char slot_count = mt_slots[b_node->type];
2419
2420 *left = mas_new_ma_node(mas, b_node);
2421 *right = NULL;
2422 *middle = NULL;
2423 *mid_split = 0;
2424
2425 if (b_node->b_end < slot_count) {
2426 split = b_node->b_end;
2427 } else {
2428 split = mab_calc_split(mas, b_node, mid_split, min);
2429 *right = mas_new_ma_node(mas, b_node);
2430 }
2431
2432 if (*mid_split)
2433 *middle = mas_new_ma_node(mas, b_node);
2434
2435 return split;
2436
2437 }
2438
2439 /*
2440 * mab_set_b_end() - Add entry to b_node at b_node->b_end and increment the end
2441 * pointer.
2442 * @b_node - the big node to add the entry
2443 * @mas - the maple state to get the pivot (mas->max)
2444 * @entry - the entry to add, if NULL nothing happens.
2445 */
mab_set_b_end(struct maple_big_node * b_node,struct ma_state * mas,void * entry)2446 static inline void mab_set_b_end(struct maple_big_node *b_node,
2447 struct ma_state *mas,
2448 void *entry)
2449 {
2450 if (!entry)
2451 return;
2452
2453 b_node->slot[b_node->b_end] = entry;
2454 if (mt_is_alloc(mas->tree))
2455 b_node->gap[b_node->b_end] = mas_max_gap(mas);
2456 b_node->pivot[b_node->b_end++] = mas->max;
2457 }
2458
2459 /*
2460 * mas_set_split_parent() - combine_then_separate helper function. Sets the parent
2461 * of @mas->node to either @left or @right, depending on @slot and @split
2462 *
2463 * @mas - the maple state with the node that needs a parent
2464 * @left - possible parent 1
2465 * @right - possible parent 2
2466 * @slot - the slot the mas->node was placed
2467 * @split - the split location between @left and @right
2468 */
mas_set_split_parent(struct ma_state * mas,struct maple_enode * left,struct maple_enode * right,unsigned char * slot,unsigned char split)2469 static inline void mas_set_split_parent(struct ma_state *mas,
2470 struct maple_enode *left,
2471 struct maple_enode *right,
2472 unsigned char *slot, unsigned char split)
2473 {
2474 if (mas_is_none(mas))
2475 return;
2476
2477 if ((*slot) <= split)
2478 mas_set_parent(mas, mas->node, left, *slot);
2479 else if (right)
2480 mas_set_parent(mas, mas->node, right, (*slot) - split - 1);
2481
2482 (*slot)++;
2483 }
2484
2485 /*
2486 * mte_mid_split_check() - Check if the next node passes the mid-split
2487 * @**l: Pointer to left encoded maple node.
2488 * @**m: Pointer to middle encoded maple node.
2489 * @**r: Pointer to right encoded maple node.
2490 * @slot: The offset
2491 * @*split: The split location.
2492 * @mid_split: The middle split.
2493 */
mte_mid_split_check(struct maple_enode ** l,struct maple_enode ** r,struct maple_enode * right,unsigned char slot,unsigned char * split,unsigned char mid_split)2494 static inline void mte_mid_split_check(struct maple_enode **l,
2495 struct maple_enode **r,
2496 struct maple_enode *right,
2497 unsigned char slot,
2498 unsigned char *split,
2499 unsigned char mid_split)
2500 {
2501 if (*r == right)
2502 return;
2503
2504 if (slot < mid_split)
2505 return;
2506
2507 *l = *r;
2508 *r = right;
2509 *split = mid_split;
2510 }
2511
2512 /*
2513 * mast_set_split_parents() - Helper function to set three nodes parents. Slot
2514 * is taken from @mast->l.
2515 * @mast - the maple subtree state
2516 * @left - the left node
2517 * @right - the right node
2518 * @split - the split location.
2519 */
mast_set_split_parents(struct maple_subtree_state * mast,struct maple_enode * left,struct maple_enode * middle,struct maple_enode * right,unsigned char split,unsigned char mid_split)2520 static inline void mast_set_split_parents(struct maple_subtree_state *mast,
2521 struct maple_enode *left,
2522 struct maple_enode *middle,
2523 struct maple_enode *right,
2524 unsigned char split,
2525 unsigned char mid_split)
2526 {
2527 unsigned char slot;
2528 struct maple_enode *l = left;
2529 struct maple_enode *r = right;
2530
2531 if (mas_is_none(mast->l))
2532 return;
2533
2534 if (middle)
2535 r = middle;
2536
2537 slot = mast->l->offset;
2538
2539 mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
2540 mas_set_split_parent(mast->l, l, r, &slot, split);
2541
2542 mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
2543 mas_set_split_parent(mast->m, l, r, &slot, split);
2544
2545 mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
2546 mas_set_split_parent(mast->r, l, r, &slot, split);
2547 }
2548
2549 /*
2550 * mas_topiary_node() - Dispose of a singe node
2551 * @mas: The maple state for pushing nodes
2552 * @enode: The encoded maple node
2553 * @in_rcu: If the tree is in rcu mode
2554 *
2555 * The node will either be RCU freed or pushed back on the maple state.
2556 */
mas_topiary_node(struct ma_state * mas,struct maple_enode * enode,bool in_rcu)2557 static inline void mas_topiary_node(struct ma_state *mas,
2558 struct maple_enode *enode, bool in_rcu)
2559 {
2560 struct maple_node *tmp;
2561
2562 if (enode == MAS_NONE)
2563 return;
2564
2565 tmp = mte_to_node(enode);
2566 mte_set_node_dead(enode);
2567 if (in_rcu)
2568 ma_free_rcu(tmp);
2569 else
2570 mas_push_node(mas, tmp);
2571 }
2572
2573 /*
2574 * mas_topiary_replace() - Replace the data with new data, then repair the
2575 * parent links within the new tree. Iterate over the dead sub-tree and collect
2576 * the dead subtrees and topiary the nodes that are no longer of use.
2577 *
2578 * The new tree will have up to three children with the correct parent. Keep
2579 * track of the new entries as they need to be followed to find the next level
2580 * of new entries.
2581 *
2582 * The old tree will have up to three children with the old parent. Keep track
2583 * of the old entries as they may have more nodes below replaced. Nodes within
2584 * [index, last] are dead subtrees, others need to be freed and followed.
2585 *
2586 * @mas: The maple state pointing at the new data
2587 * @old_enode: The maple encoded node being replaced
2588 *
2589 */
mas_topiary_replace(struct ma_state * mas,struct maple_enode * old_enode)2590 static inline void mas_topiary_replace(struct ma_state *mas,
2591 struct maple_enode *old_enode)
2592 {
2593 struct ma_state tmp[3], tmp_next[3];
2594 MA_TOPIARY(subtrees, mas->tree);
2595 bool in_rcu;
2596 int i, n;
2597
2598 /* Place data in tree & then mark node as old */
2599 mas_put_in_tree(mas, old_enode);
2600
2601 /* Update the parent pointers in the tree */
2602 tmp[0] = *mas;
2603 tmp[0].offset = 0;
2604 tmp[1].node = MAS_NONE;
2605 tmp[2].node = MAS_NONE;
2606 while (!mte_is_leaf(tmp[0].node)) {
2607 n = 0;
2608 for (i = 0; i < 3; i++) {
2609 if (mas_is_none(&tmp[i]))
2610 continue;
2611
2612 while (n < 3) {
2613 if (!mas_find_child(&tmp[i], &tmp_next[n]))
2614 break;
2615 n++;
2616 }
2617
2618 mas_adopt_children(&tmp[i], tmp[i].node);
2619 }
2620
2621 if (MAS_WARN_ON(mas, n == 0))
2622 break;
2623
2624 while (n < 3)
2625 tmp_next[n++].node = MAS_NONE;
2626
2627 for (i = 0; i < 3; i++)
2628 tmp[i] = tmp_next[i];
2629 }
2630
2631 /* Collect the old nodes that need to be discarded */
2632 if (mte_is_leaf(old_enode))
2633 return mas_free(mas, old_enode);
2634
2635 tmp[0] = *mas;
2636 tmp[0].offset = 0;
2637 tmp[0].node = old_enode;
2638 tmp[1].node = MAS_NONE;
2639 tmp[2].node = MAS_NONE;
2640 in_rcu = mt_in_rcu(mas->tree);
2641 do {
2642 n = 0;
2643 for (i = 0; i < 3; i++) {
2644 if (mas_is_none(&tmp[i]))
2645 continue;
2646
2647 while (n < 3) {
2648 if (!mas_find_child(&tmp[i], &tmp_next[n]))
2649 break;
2650
2651 if ((tmp_next[n].min >= tmp_next->index) &&
2652 (tmp_next[n].max <= tmp_next->last)) {
2653 mat_add(&subtrees, tmp_next[n].node);
2654 tmp_next[n].node = MAS_NONE;
2655 } else {
2656 n++;
2657 }
2658 }
2659 }
2660
2661 if (MAS_WARN_ON(mas, n == 0))
2662 break;
2663
2664 while (n < 3)
2665 tmp_next[n++].node = MAS_NONE;
2666
2667 for (i = 0; i < 3; i++) {
2668 mas_topiary_node(mas, tmp[i].node, in_rcu);
2669 tmp[i] = tmp_next[i];
2670 }
2671 } while (!mte_is_leaf(tmp[0].node));
2672
2673 for (i = 0; i < 3; i++)
2674 mas_topiary_node(mas, tmp[i].node, in_rcu);
2675
2676 mas_mat_destroy(mas, &subtrees);
2677 }
2678
2679 /*
2680 * mas_wmb_replace() - Write memory barrier and replace
2681 * @mas: The maple state
2682 * @old: The old maple encoded node that is being replaced.
2683 *
2684 * Updates gap as necessary.
2685 */
mas_wmb_replace(struct ma_state * mas,struct maple_enode * old_enode)2686 static inline void mas_wmb_replace(struct ma_state *mas,
2687 struct maple_enode *old_enode)
2688 {
2689 /* Insert the new data in the tree */
2690 mas_topiary_replace(mas, old_enode);
2691
2692 if (mte_is_leaf(mas->node))
2693 return;
2694
2695 mas_update_gap(mas);
2696 }
2697
2698 /*
2699 * mast_cp_to_nodes() - Copy data out to nodes.
2700 * @mast: The maple subtree state
2701 * @left: The left encoded maple node
2702 * @middle: The middle encoded maple node
2703 * @right: The right encoded maple node
2704 * @split: The location to split between left and (middle ? middle : right)
2705 * @mid_split: The location to split between middle and right.
2706 */
mast_cp_to_nodes(struct maple_subtree_state * mast,struct maple_enode * left,struct maple_enode * middle,struct maple_enode * right,unsigned char split,unsigned char mid_split)2707 static inline void mast_cp_to_nodes(struct maple_subtree_state *mast,
2708 struct maple_enode *left, struct maple_enode *middle,
2709 struct maple_enode *right, unsigned char split, unsigned char mid_split)
2710 {
2711 bool new_lmax = true;
2712
2713 mast->l->node = mte_node_or_none(left);
2714 mast->m->node = mte_node_or_none(middle);
2715 mast->r->node = mte_node_or_none(right);
2716
2717 mast->l->min = mast->orig_l->min;
2718 if (split == mast->bn->b_end) {
2719 mast->l->max = mast->orig_r->max;
2720 new_lmax = false;
2721 }
2722
2723 mab_mas_cp(mast->bn, 0, split, mast->l, new_lmax);
2724
2725 if (middle) {
2726 mab_mas_cp(mast->bn, 1 + split, mid_split, mast->m, true);
2727 mast->m->min = mast->bn->pivot[split] + 1;
2728 split = mid_split;
2729 }
2730
2731 mast->r->max = mast->orig_r->max;
2732 if (right) {
2733 mab_mas_cp(mast->bn, 1 + split, mast->bn->b_end, mast->r, false);
2734 mast->r->min = mast->bn->pivot[split] + 1;
2735 }
2736 }
2737
2738 /*
2739 * mast_combine_cp_left - Copy in the original left side of the tree into the
2740 * combined data set in the maple subtree state big node.
2741 * @mast: The maple subtree state
2742 */
mast_combine_cp_left(struct maple_subtree_state * mast)2743 static inline void mast_combine_cp_left(struct maple_subtree_state *mast)
2744 {
2745 unsigned char l_slot = mast->orig_l->offset;
2746
2747 if (!l_slot)
2748 return;
2749
2750 mas_mab_cp(mast->orig_l, 0, l_slot - 1, mast->bn, 0);
2751 }
2752
2753 /*
2754 * mast_combine_cp_right: Copy in the original right side of the tree into the
2755 * combined data set in the maple subtree state big node.
2756 * @mast: The maple subtree state
2757 */
mast_combine_cp_right(struct maple_subtree_state * mast)2758 static inline void mast_combine_cp_right(struct maple_subtree_state *mast)
2759 {
2760 if (mast->bn->pivot[mast->bn->b_end - 1] >= mast->orig_r->max)
2761 return;
2762
2763 mas_mab_cp(mast->orig_r, mast->orig_r->offset + 1,
2764 mt_slot_count(mast->orig_r->node), mast->bn,
2765 mast->bn->b_end);
2766 mast->orig_r->last = mast->orig_r->max;
2767 }
2768
2769 /*
2770 * mast_sufficient: Check if the maple subtree state has enough data in the big
2771 * node to create at least one sufficient node
2772 * @mast: the maple subtree state
2773 */
mast_sufficient(struct maple_subtree_state * mast)2774 static inline bool mast_sufficient(struct maple_subtree_state *mast)
2775 {
2776 if (mast->bn->b_end > mt_min_slot_count(mast->orig_l->node))
2777 return true;
2778
2779 return false;
2780 }
2781
2782 /*
2783 * mast_overflow: Check if there is too much data in the subtree state for a
2784 * single node.
2785 * @mast: The maple subtree state
2786 */
mast_overflow(struct maple_subtree_state * mast)2787 static inline bool mast_overflow(struct maple_subtree_state *mast)
2788 {
2789 if (mast->bn->b_end >= mt_slot_count(mast->orig_l->node))
2790 return true;
2791
2792 return false;
2793 }
2794
mtree_range_walk(struct ma_state * mas)2795 static inline void *mtree_range_walk(struct ma_state *mas)
2796 {
2797 unsigned long *pivots;
2798 unsigned char offset;
2799 struct maple_node *node;
2800 struct maple_enode *next, *last;
2801 enum maple_type type;
2802 void __rcu **slots;
2803 unsigned char end;
2804 unsigned long max, min;
2805 unsigned long prev_max, prev_min;
2806
2807 next = mas->node;
2808 min = mas->min;
2809 max = mas->max;
2810 do {
2811 offset = 0;
2812 last = next;
2813 node = mte_to_node(next);
2814 type = mte_node_type(next);
2815 pivots = ma_pivots(node, type);
2816 end = ma_data_end(node, type, pivots, max);
2817 if (unlikely(ma_dead_node(node)))
2818 goto dead_node;
2819
2820 if (pivots[offset] >= mas->index) {
2821 prev_max = max;
2822 prev_min = min;
2823 max = pivots[offset];
2824 goto next;
2825 }
2826
2827 do {
2828 offset++;
2829 } while ((offset < end) && (pivots[offset] < mas->index));
2830
2831 prev_min = min;
2832 min = pivots[offset - 1] + 1;
2833 prev_max = max;
2834 if (likely(offset < end && pivots[offset]))
2835 max = pivots[offset];
2836
2837 next:
2838 slots = ma_slots(node, type);
2839 next = mt_slot(mas->tree, slots, offset);
2840 if (unlikely(ma_dead_node(node)))
2841 goto dead_node;
2842 } while (!ma_is_leaf(type));
2843
2844 mas->offset = offset;
2845 mas->index = min;
2846 mas->last = max;
2847 mas->min = prev_min;
2848 mas->max = prev_max;
2849 mas->node = last;
2850 return (void *)next;
2851
2852 dead_node:
2853 mas_reset(mas);
2854 return NULL;
2855 }
2856
2857 /*
2858 * mas_spanning_rebalance() - Rebalance across two nodes which may not be peers.
2859 * @mas: The starting maple state
2860 * @mast: The maple_subtree_state, keeps track of 4 maple states.
2861 * @count: The estimated count of iterations needed.
2862 *
2863 * Follow the tree upwards from @l_mas and @r_mas for @count, or until the root
2864 * is hit. First @b_node is split into two entries which are inserted into the
2865 * next iteration of the loop. @b_node is returned populated with the final
2866 * iteration. @mas is used to obtain allocations. orig_l_mas keeps track of the
2867 * nodes that will remain active by using orig_l_mas->index and orig_l_mas->last
2868 * to account of what has been copied into the new sub-tree. The update of
2869 * orig_l_mas->last is used in mas_consume to find the slots that will need to
2870 * be either freed or destroyed. orig_l_mas->depth keeps track of the height of
2871 * the new sub-tree in case the sub-tree becomes the full tree.
2872 *
2873 * Return: the number of elements in b_node during the last loop.
2874 */
mas_spanning_rebalance(struct ma_state * mas,struct maple_subtree_state * mast,unsigned char count)2875 static int mas_spanning_rebalance(struct ma_state *mas,
2876 struct maple_subtree_state *mast, unsigned char count)
2877 {
2878 unsigned char split, mid_split;
2879 unsigned char slot = 0;
2880 struct maple_enode *left = NULL, *middle = NULL, *right = NULL;
2881 struct maple_enode *old_enode;
2882
2883 MA_STATE(l_mas, mas->tree, mas->index, mas->index);
2884 MA_STATE(r_mas, mas->tree, mas->index, mas->last);
2885 MA_STATE(m_mas, mas->tree, mas->index, mas->index);
2886
2887 /*
2888 * The tree needs to be rebalanced and leaves need to be kept at the same level.
2889 * Rebalancing is done by use of the ``struct maple_topiary``.
2890 */
2891 mast->l = &l_mas;
2892 mast->m = &m_mas;
2893 mast->r = &r_mas;
2894 l_mas.node = r_mas.node = m_mas.node = MAS_NONE;
2895
2896 /* Check if this is not root and has sufficient data. */
2897 if (((mast->orig_l->min != 0) || (mast->orig_r->max != ULONG_MAX)) &&
2898 unlikely(mast->bn->b_end <= mt_min_slots[mast->bn->type]))
2899 mast_spanning_rebalance(mast);
2900
2901 l_mas.depth = 0;
2902
2903 /*
2904 * Each level of the tree is examined and balanced, pushing data to the left or
2905 * right, or rebalancing against left or right nodes is employed to avoid
2906 * rippling up the tree to limit the amount of churn. Once a new sub-section of
2907 * the tree is created, there may be a mix of new and old nodes. The old nodes
2908 * will have the incorrect parent pointers and currently be in two trees: the
2909 * original tree and the partially new tree. To remedy the parent pointers in
2910 * the old tree, the new data is swapped into the active tree and a walk down
2911 * the tree is performed and the parent pointers are updated.
2912 * See mas_topiary_replace() for more information.
2913 */
2914 while (count--) {
2915 mast->bn->b_end--;
2916 mast->bn->type = mte_node_type(mast->orig_l->node);
2917 split = mas_mab_to_node(mas, mast->bn, &left, &right, &middle,
2918 &mid_split, mast->orig_l->min);
2919 mast_set_split_parents(mast, left, middle, right, split,
2920 mid_split);
2921 mast_cp_to_nodes(mast, left, middle, right, split, mid_split);
2922
2923 /*
2924 * Copy data from next level in the tree to mast->bn from next
2925 * iteration
2926 */
2927 memset(mast->bn, 0, sizeof(struct maple_big_node));
2928 mast->bn->type = mte_node_type(left);
2929 l_mas.depth++;
2930
2931 /* Root already stored in l->node. */
2932 if (mas_is_root_limits(mast->l))
2933 goto new_root;
2934
2935 mast_ascend(mast);
2936 mast_combine_cp_left(mast);
2937 l_mas.offset = mast->bn->b_end;
2938 mab_set_b_end(mast->bn, &l_mas, left);
2939 mab_set_b_end(mast->bn, &m_mas, middle);
2940 mab_set_b_end(mast->bn, &r_mas, right);
2941
2942 /* Copy anything necessary out of the right node. */
2943 mast_combine_cp_right(mast);
2944 mast->orig_l->last = mast->orig_l->max;
2945
2946 if (mast_sufficient(mast))
2947 continue;
2948
2949 if (mast_overflow(mast))
2950 continue;
2951
2952 /* May be a new root stored in mast->bn */
2953 if (mas_is_root_limits(mast->orig_l))
2954 break;
2955
2956 mast_spanning_rebalance(mast);
2957
2958 /* rebalancing from other nodes may require another loop. */
2959 if (!count)
2960 count++;
2961 }
2962
2963 l_mas.node = mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)),
2964 mte_node_type(mast->orig_l->node));
2965 l_mas.depth++;
2966 mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, &l_mas, true);
2967 mas_set_parent(mas, left, l_mas.node, slot);
2968 if (middle)
2969 mas_set_parent(mas, middle, l_mas.node, ++slot);
2970
2971 if (right)
2972 mas_set_parent(mas, right, l_mas.node, ++slot);
2973
2974 if (mas_is_root_limits(mast->l)) {
2975 new_root:
2976 mas_mn(mast->l)->parent = ma_parent_ptr(mas_tree_parent(mas));
2977 while (!mte_is_root(mast->orig_l->node))
2978 mast_ascend(mast);
2979 } else {
2980 mas_mn(&l_mas)->parent = mas_mn(mast->orig_l)->parent;
2981 }
2982
2983 old_enode = mast->orig_l->node;
2984 mas->depth = l_mas.depth;
2985 mas->node = l_mas.node;
2986 mas->min = l_mas.min;
2987 mas->max = l_mas.max;
2988 mas->offset = l_mas.offset;
2989 mas_wmb_replace(mas, old_enode);
2990 mtree_range_walk(mas);
2991 return mast->bn->b_end;
2992 }
2993
2994 /*
2995 * mas_rebalance() - Rebalance a given node.
2996 * @mas: The maple state
2997 * @b_node: The big maple node.
2998 *
2999 * Rebalance two nodes into a single node or two new nodes that are sufficient.
3000 * Continue upwards until tree is sufficient.
3001 *
3002 * Return: the number of elements in b_node during the last loop.
3003 */
mas_rebalance(struct ma_state * mas,struct maple_big_node * b_node)3004 static inline int mas_rebalance(struct ma_state *mas,
3005 struct maple_big_node *b_node)
3006 {
3007 char empty_count = mas_mt_height(mas);
3008 struct maple_subtree_state mast;
3009 unsigned char shift, b_end = ++b_node->b_end;
3010
3011 MA_STATE(l_mas, mas->tree, mas->index, mas->last);
3012 MA_STATE(r_mas, mas->tree, mas->index, mas->last);
3013
3014 trace_ma_op(__func__, mas);
3015
3016 /*
3017 * Rebalancing occurs if a node is insufficient. Data is rebalanced
3018 * against the node to the right if it exists, otherwise the node to the
3019 * left of this node is rebalanced against this node. If rebalancing
3020 * causes just one node to be produced instead of two, then the parent
3021 * is also examined and rebalanced if it is insufficient. Every level
3022 * tries to combine the data in the same way. If one node contains the
3023 * entire range of the tree, then that node is used as a new root node.
3024 */
3025 mas_node_count(mas, empty_count * 2 - 1);
3026 if (mas_is_err(mas))
3027 return 0;
3028
3029 mast.orig_l = &l_mas;
3030 mast.orig_r = &r_mas;
3031 mast.bn = b_node;
3032 mast.bn->type = mte_node_type(mas->node);
3033
3034 l_mas = r_mas = *mas;
3035
3036 if (mas_next_sibling(&r_mas)) {
3037 mas_mab_cp(&r_mas, 0, mt_slot_count(r_mas.node), b_node, b_end);
3038 r_mas.last = r_mas.index = r_mas.max;
3039 } else {
3040 mas_prev_sibling(&l_mas);
3041 shift = mas_data_end(&l_mas) + 1;
3042 mab_shift_right(b_node, shift);
3043 mas->offset += shift;
3044 mas_mab_cp(&l_mas, 0, shift - 1, b_node, 0);
3045 b_node->b_end = shift + b_end;
3046 l_mas.index = l_mas.last = l_mas.min;
3047 }
3048
3049 return mas_spanning_rebalance(mas, &mast, empty_count);
3050 }
3051
3052 /*
3053 * mas_destroy_rebalance() - Rebalance left-most node while destroying the maple
3054 * state.
3055 * @mas: The maple state
3056 * @end: The end of the left-most node.
3057 *
3058 * During a mass-insert event (such as forking), it may be necessary to
3059 * rebalance the left-most node when it is not sufficient.
3060 */
mas_destroy_rebalance(struct ma_state * mas,unsigned char end)3061 static inline void mas_destroy_rebalance(struct ma_state *mas, unsigned char end)
3062 {
3063 enum maple_type mt = mte_node_type(mas->node);
3064 struct maple_node reuse, *newnode, *parent, *new_left, *left, *node;
3065 struct maple_enode *eparent, *old_eparent;
3066 unsigned char offset, tmp, split = mt_slots[mt] / 2;
3067 void __rcu **l_slots, **slots;
3068 unsigned long *l_pivs, *pivs, gap;
3069 bool in_rcu = mt_in_rcu(mas->tree);
3070
3071 MA_STATE(l_mas, mas->tree, mas->index, mas->last);
3072
3073 l_mas = *mas;
3074 mas_prev_sibling(&l_mas);
3075
3076 /* set up node. */
3077 if (in_rcu) {
3078 /* Allocate for both left and right as well as parent. */
3079 mas_node_count(mas, 3);
3080 if (mas_is_err(mas))
3081 return;
3082
3083 newnode = mas_pop_node(mas);
3084 } else {
3085 newnode = &reuse;
3086 }
3087
3088 node = mas_mn(mas);
3089 newnode->parent = node->parent;
3090 slots = ma_slots(newnode, mt);
3091 pivs = ma_pivots(newnode, mt);
3092 left = mas_mn(&l_mas);
3093 l_slots = ma_slots(left, mt);
3094 l_pivs = ma_pivots(left, mt);
3095 if (!l_slots[split])
3096 split++;
3097 tmp = mas_data_end(&l_mas) - split;
3098
3099 memcpy(slots, l_slots + split + 1, sizeof(void *) * tmp);
3100 memcpy(pivs, l_pivs + split + 1, sizeof(unsigned long) * tmp);
3101 pivs[tmp] = l_mas.max;
3102 memcpy(slots + tmp, ma_slots(node, mt), sizeof(void *) * end);
3103 memcpy(pivs + tmp, ma_pivots(node, mt), sizeof(unsigned long) * end);
3104
3105 l_mas.max = l_pivs[split];
3106 mas->min = l_mas.max + 1;
3107 old_eparent = mt_mk_node(mte_parent(l_mas.node),
3108 mas_parent_type(&l_mas, l_mas.node));
3109 tmp += end;
3110 if (!in_rcu) {
3111 unsigned char max_p = mt_pivots[mt];
3112 unsigned char max_s = mt_slots[mt];
3113
3114 if (tmp < max_p)
3115 memset(pivs + tmp, 0,
3116 sizeof(unsigned long) * (max_p - tmp));
3117
3118 if (tmp < mt_slots[mt])
3119 memset(slots + tmp, 0, sizeof(void *) * (max_s - tmp));
3120
3121 memcpy(node, newnode, sizeof(struct maple_node));
3122 ma_set_meta(node, mt, 0, tmp - 1);
3123 mte_set_pivot(old_eparent, mte_parent_slot(l_mas.node),
3124 l_pivs[split]);
3125
3126 /* Remove data from l_pivs. */
3127 tmp = split + 1;
3128 memset(l_pivs + tmp, 0, sizeof(unsigned long) * (max_p - tmp));
3129 memset(l_slots + tmp, 0, sizeof(void *) * (max_s - tmp));
3130 ma_set_meta(left, mt, 0, split);
3131 eparent = old_eparent;
3132
3133 goto done;
3134 }
3135
3136 /* RCU requires replacing both l_mas, mas, and parent. */
3137 mas->node = mt_mk_node(newnode, mt);
3138 ma_set_meta(newnode, mt, 0, tmp);
3139
3140 new_left = mas_pop_node(mas);
3141 new_left->parent = left->parent;
3142 mt = mte_node_type(l_mas.node);
3143 slots = ma_slots(new_left, mt);
3144 pivs = ma_pivots(new_left, mt);
3145 memcpy(slots, l_slots, sizeof(void *) * split);
3146 memcpy(pivs, l_pivs, sizeof(unsigned long) * split);
3147 ma_set_meta(new_left, mt, 0, split);
3148 l_mas.node = mt_mk_node(new_left, mt);
3149
3150 /* replace parent. */
3151 offset = mte_parent_slot(mas->node);
3152 mt = mas_parent_type(&l_mas, l_mas.node);
3153 parent = mas_pop_node(mas);
3154 slots = ma_slots(parent, mt);
3155 pivs = ma_pivots(parent, mt);
3156 memcpy(parent, mte_to_node(old_eparent), sizeof(struct maple_node));
3157 rcu_assign_pointer(slots[offset], mas->node);
3158 rcu_assign_pointer(slots[offset - 1], l_mas.node);
3159 pivs[offset - 1] = l_mas.max;
3160 eparent = mt_mk_node(parent, mt);
3161 done:
3162 gap = mas_leaf_max_gap(mas);
3163 mte_set_gap(eparent, mte_parent_slot(mas->node), gap);
3164 gap = mas_leaf_max_gap(&l_mas);
3165 mte_set_gap(eparent, mte_parent_slot(l_mas.node), gap);
3166 mas_ascend(mas);
3167
3168 if (in_rcu) {
3169 mas_replace_node(mas, old_eparent);
3170 mas_adopt_children(mas, mas->node);
3171 }
3172
3173 mas_update_gap(mas);
3174 }
3175
3176 /*
3177 * mas_split_final_node() - Split the final node in a subtree operation.
3178 * @mast: the maple subtree state
3179 * @mas: The maple state
3180 * @height: The height of the tree in case it's a new root.
3181 */
mas_split_final_node(struct maple_subtree_state * mast,struct ma_state * mas,int height)3182 static inline bool mas_split_final_node(struct maple_subtree_state *mast,
3183 struct ma_state *mas, int height)
3184 {
3185 struct maple_enode *ancestor;
3186
3187 if (mte_is_root(mas->node)) {
3188 if (mt_is_alloc(mas->tree))
3189 mast->bn->type = maple_arange_64;
3190 else
3191 mast->bn->type = maple_range_64;
3192 mas->depth = height;
3193 }
3194 /*
3195 * Only a single node is used here, could be root.
3196 * The Big_node data should just fit in a single node.
3197 */
3198 ancestor = mas_new_ma_node(mas, mast->bn);
3199 mas_set_parent(mas, mast->l->node, ancestor, mast->l->offset);
3200 mas_set_parent(mas, mast->r->node, ancestor, mast->r->offset);
3201 mte_to_node(ancestor)->parent = mas_mn(mas)->parent;
3202
3203 mast->l->node = ancestor;
3204 mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, mast->l, true);
3205 mas->offset = mast->bn->b_end - 1;
3206 return true;
3207 }
3208
3209 /*
3210 * mast_fill_bnode() - Copy data into the big node in the subtree state
3211 * @mast: The maple subtree state
3212 * @mas: the maple state
3213 * @skip: The number of entries to skip for new nodes insertion.
3214 */
mast_fill_bnode(struct maple_subtree_state * mast,struct ma_state * mas,unsigned char skip)3215 static inline void mast_fill_bnode(struct maple_subtree_state *mast,
3216 struct ma_state *mas,
3217 unsigned char skip)
3218 {
3219 bool cp = true;
3220 unsigned char split;
3221
3222 memset(mast->bn->gap, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->gap));
3223 memset(mast->bn->slot, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->slot));
3224 memset(mast->bn->pivot, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->pivot));
3225 mast->bn->b_end = 0;
3226
3227 if (mte_is_root(mas->node)) {
3228 cp = false;
3229 } else {
3230 mas_ascend(mas);
3231 mas->offset = mte_parent_slot(mas->node);
3232 }
3233
3234 if (cp && mast->l->offset)
3235 mas_mab_cp(mas, 0, mast->l->offset - 1, mast->bn, 0);
3236
3237 split = mast->bn->b_end;
3238 mab_set_b_end(mast->bn, mast->l, mast->l->node);
3239 mast->r->offset = mast->bn->b_end;
3240 mab_set_b_end(mast->bn, mast->r, mast->r->node);
3241 if (mast->bn->pivot[mast->bn->b_end - 1] == mas->max)
3242 cp = false;
3243
3244 if (cp)
3245 mas_mab_cp(mas, split + skip, mt_slot_count(mas->node) - 1,
3246 mast->bn, mast->bn->b_end);
3247
3248 mast->bn->b_end--;
3249 mast->bn->type = mte_node_type(mas->node);
3250 }
3251
3252 /*
3253 * mast_split_data() - Split the data in the subtree state big node into regular
3254 * nodes.
3255 * @mast: The maple subtree state
3256 * @mas: The maple state
3257 * @split: The location to split the big node
3258 */
mast_split_data(struct maple_subtree_state * mast,struct ma_state * mas,unsigned char split)3259 static inline void mast_split_data(struct maple_subtree_state *mast,
3260 struct ma_state *mas, unsigned char split)
3261 {
3262 unsigned char p_slot;
3263
3264 mab_mas_cp(mast->bn, 0, split, mast->l, true);
3265 mte_set_pivot(mast->r->node, 0, mast->r->max);
3266 mab_mas_cp(mast->bn, split + 1, mast->bn->b_end, mast->r, false);
3267 mast->l->offset = mte_parent_slot(mas->node);
3268 mast->l->max = mast->bn->pivot[split];
3269 mast->r->min = mast->l->max + 1;
3270 if (mte_is_leaf(mas->node))
3271 return;
3272
3273 p_slot = mast->orig_l->offset;
3274 mas_set_split_parent(mast->orig_l, mast->l->node, mast->r->node,
3275 &p_slot, split);
3276 mas_set_split_parent(mast->orig_r, mast->l->node, mast->r->node,
3277 &p_slot, split);
3278 }
3279
3280 /*
3281 * mas_push_data() - Instead of splitting a node, it is beneficial to push the
3282 * data to the right or left node if there is room.
3283 * @mas: The maple state
3284 * @height: The current height of the maple state
3285 * @mast: The maple subtree state
3286 * @left: Push left or not.
3287 *
3288 * Keeping the height of the tree low means faster lookups.
3289 *
3290 * Return: True if pushed, false otherwise.
3291 */
mas_push_data(struct ma_state * mas,int height,struct maple_subtree_state * mast,bool left)3292 static inline bool mas_push_data(struct ma_state *mas, int height,
3293 struct maple_subtree_state *mast, bool left)
3294 {
3295 unsigned char slot_total = mast->bn->b_end;
3296 unsigned char end, space, split;
3297
3298 MA_STATE(tmp_mas, mas->tree, mas->index, mas->last);
3299 tmp_mas = *mas;
3300 tmp_mas.depth = mast->l->depth;
3301
3302 if (left && !mas_prev_sibling(&tmp_mas))
3303 return false;
3304 else if (!left && !mas_next_sibling(&tmp_mas))
3305 return false;
3306
3307 end = mas_data_end(&tmp_mas);
3308 slot_total += end;
3309 space = 2 * mt_slot_count(mas->node) - 2;
3310 /* -2 instead of -1 to ensure there isn't a triple split */
3311 if (ma_is_leaf(mast->bn->type))
3312 space--;
3313
3314 if (mas->max == ULONG_MAX)
3315 space--;
3316
3317 if (slot_total >= space)
3318 return false;
3319
3320 /* Get the data; Fill mast->bn */
3321 mast->bn->b_end++;
3322 if (left) {
3323 mab_shift_right(mast->bn, end + 1);
3324 mas_mab_cp(&tmp_mas, 0, end, mast->bn, 0);
3325 mast->bn->b_end = slot_total + 1;
3326 } else {
3327 mas_mab_cp(&tmp_mas, 0, end, mast->bn, mast->bn->b_end);
3328 }
3329
3330 /* Configure mast for splitting of mast->bn */
3331 split = mt_slots[mast->bn->type] - 2;
3332 if (left) {
3333 /* Switch mas to prev node */
3334 *mas = tmp_mas;
3335 /* Start using mast->l for the left side. */
3336 tmp_mas.node = mast->l->node;
3337 *mast->l = tmp_mas;
3338 } else {
3339 tmp_mas.node = mast->r->node;
3340 *mast->r = tmp_mas;
3341 split = slot_total - split;
3342 }
3343 split = mab_no_null_split(mast->bn, split, mt_slots[mast->bn->type]);
3344 /* Update parent slot for split calculation. */
3345 if (left)
3346 mast->orig_l->offset += end + 1;
3347
3348 mast_split_data(mast, mas, split);
3349 mast_fill_bnode(mast, mas, 2);
3350 mas_split_final_node(mast, mas, height + 1);
3351 return true;
3352 }
3353
3354 /*
3355 * mas_split() - Split data that is too big for one node into two.
3356 * @mas: The maple state
3357 * @b_node: The maple big node
3358 * Return: 1 on success, 0 on failure.
3359 */
mas_split(struct ma_state * mas,struct maple_big_node * b_node)3360 static int mas_split(struct ma_state *mas, struct maple_big_node *b_node)
3361 {
3362 struct maple_subtree_state mast;
3363 int height = 0;
3364 unsigned char mid_split, split = 0;
3365 struct maple_enode *old;
3366
3367 /*
3368 * Splitting is handled differently from any other B-tree; the Maple
3369 * Tree splits upwards. Splitting up means that the split operation
3370 * occurs when the walk of the tree hits the leaves and not on the way
3371 * down. The reason for splitting up is that it is impossible to know
3372 * how much space will be needed until the leaf is (or leaves are)
3373 * reached. Since overwriting data is allowed and a range could
3374 * overwrite more than one range or result in changing one entry into 3
3375 * entries, it is impossible to know if a split is required until the
3376 * data is examined.
3377 *
3378 * Splitting is a balancing act between keeping allocations to a minimum
3379 * and avoiding a 'jitter' event where a tree is expanded to make room
3380 * for an entry followed by a contraction when the entry is removed. To
3381 * accomplish the balance, there are empty slots remaining in both left
3382 * and right nodes after a split.
3383 */
3384 MA_STATE(l_mas, mas->tree, mas->index, mas->last);
3385 MA_STATE(r_mas, mas->tree, mas->index, mas->last);
3386 MA_STATE(prev_l_mas, mas->tree, mas->index, mas->last);
3387 MA_STATE(prev_r_mas, mas->tree, mas->index, mas->last);
3388
3389 trace_ma_op(__func__, mas);
3390 mas->depth = mas_mt_height(mas);
3391 /* Allocation failures will happen early. */
3392 mas_node_count(mas, 1 + mas->depth * 2);
3393 if (mas_is_err(mas))
3394 return 0;
3395
3396 mast.l = &l_mas;
3397 mast.r = &r_mas;
3398 mast.orig_l = &prev_l_mas;
3399 mast.orig_r = &prev_r_mas;
3400 mast.bn = b_node;
3401
3402 while (height++ <= mas->depth) {
3403 if (mt_slots[b_node->type] > b_node->b_end) {
3404 mas_split_final_node(&mast, mas, height);
3405 break;
3406 }
3407
3408 l_mas = r_mas = *mas;
3409 l_mas.node = mas_new_ma_node(mas, b_node);
3410 r_mas.node = mas_new_ma_node(mas, b_node);
3411 /*
3412 * Another way that 'jitter' is avoided is to terminate a split up early if the
3413 * left or right node has space to spare. This is referred to as "pushing left"
3414 * or "pushing right" and is similar to the B* tree, except the nodes left or
3415 * right can rarely be reused due to RCU, but the ripple upwards is halted which
3416 * is a significant savings.
3417 */
3418 /* Try to push left. */
3419 if (mas_push_data(mas, height, &mast, true))
3420 break;
3421
3422 /* Try to push right. */
3423 if (mas_push_data(mas, height, &mast, false))
3424 break;
3425
3426 split = mab_calc_split(mas, b_node, &mid_split, prev_l_mas.min);
3427 mast_split_data(&mast, mas, split);
3428 /*
3429 * Usually correct, mab_mas_cp in the above call overwrites
3430 * r->max.
3431 */
3432 mast.r->max = mas->max;
3433 mast_fill_bnode(&mast, mas, 1);
3434 prev_l_mas = *mast.l;
3435 prev_r_mas = *mast.r;
3436 }
3437
3438 /* Set the original node as dead */
3439 old = mas->node;
3440 mas->node = l_mas.node;
3441 mas_wmb_replace(mas, old);
3442 mtree_range_walk(mas);
3443 return 1;
3444 }
3445
3446 /*
3447 * mas_reuse_node() - Reuse the node to store the data.
3448 * @wr_mas: The maple write state
3449 * @bn: The maple big node
3450 * @end: The end of the data.
3451 *
3452 * Will always return false in RCU mode.
3453 *
3454 * Return: True if node was reused, false otherwise.
3455 */
mas_reuse_node(struct ma_wr_state * wr_mas,struct maple_big_node * bn,unsigned char end)3456 static inline bool mas_reuse_node(struct ma_wr_state *wr_mas,
3457 struct maple_big_node *bn, unsigned char end)
3458 {
3459 /* Need to be rcu safe. */
3460 if (mt_in_rcu(wr_mas->mas->tree))
3461 return false;
3462
3463 if (end > bn->b_end) {
3464 int clear = mt_slots[wr_mas->type] - bn->b_end;
3465
3466 memset(wr_mas->slots + bn->b_end, 0, sizeof(void *) * clear--);
3467 memset(wr_mas->pivots + bn->b_end, 0, sizeof(void *) * clear);
3468 }
3469 mab_mas_cp(bn, 0, bn->b_end, wr_mas->mas, false);
3470 return true;
3471 }
3472
3473 /*
3474 * mas_commit_b_node() - Commit the big node into the tree.
3475 * @wr_mas: The maple write state
3476 * @b_node: The maple big node
3477 * @end: The end of the data.
3478 */
mas_commit_b_node(struct ma_wr_state * wr_mas,struct maple_big_node * b_node,unsigned char end)3479 static noinline_for_kasan int mas_commit_b_node(struct ma_wr_state *wr_mas,
3480 struct maple_big_node *b_node, unsigned char end)
3481 {
3482 struct maple_node *node;
3483 struct maple_enode *old_enode;
3484 unsigned char b_end = b_node->b_end;
3485 enum maple_type b_type = b_node->type;
3486
3487 old_enode = wr_mas->mas->node;
3488 if ((b_end < mt_min_slots[b_type]) &&
3489 (!mte_is_root(old_enode)) &&
3490 (mas_mt_height(wr_mas->mas) > 1))
3491 return mas_rebalance(wr_mas->mas, b_node);
3492
3493 if (b_end >= mt_slots[b_type])
3494 return mas_split(wr_mas->mas, b_node);
3495
3496 if (mas_reuse_node(wr_mas, b_node, end))
3497 goto reuse_node;
3498
3499 mas_node_count(wr_mas->mas, 1);
3500 if (mas_is_err(wr_mas->mas))
3501 return 0;
3502
3503 node = mas_pop_node(wr_mas->mas);
3504 node->parent = mas_mn(wr_mas->mas)->parent;
3505 wr_mas->mas->node = mt_mk_node(node, b_type);
3506 mab_mas_cp(b_node, 0, b_end, wr_mas->mas, false);
3507 mas_replace_node(wr_mas->mas, old_enode);
3508 reuse_node:
3509 mas_update_gap(wr_mas->mas);
3510 return 1;
3511 }
3512
3513 /*
3514 * mas_root_expand() - Expand a root to a node
3515 * @mas: The maple state
3516 * @entry: The entry to store into the tree
3517 */
mas_root_expand(struct ma_state * mas,void * entry)3518 static inline int mas_root_expand(struct ma_state *mas, void *entry)
3519 {
3520 void *contents = mas_root_locked(mas);
3521 enum maple_type type = maple_leaf_64;
3522 struct maple_node *node;
3523 void __rcu **slots;
3524 unsigned long *pivots;
3525 int slot = 0;
3526
3527 mas_node_count(mas, 1);
3528 if (unlikely(mas_is_err(mas)))
3529 return 0;
3530
3531 node = mas_pop_node(mas);
3532 pivots = ma_pivots(node, type);
3533 slots = ma_slots(node, type);
3534 node->parent = ma_parent_ptr(mas_tree_parent(mas));
3535 mas->node = mt_mk_node(node, type);
3536
3537 if (mas->index) {
3538 if (contents) {
3539 rcu_assign_pointer(slots[slot], contents);
3540 if (likely(mas->index > 1))
3541 slot++;
3542 }
3543 pivots[slot++] = mas->index - 1;
3544 }
3545
3546 rcu_assign_pointer(slots[slot], entry);
3547 mas->offset = slot;
3548 pivots[slot] = mas->last;
3549 if (mas->last != ULONG_MAX)
3550 pivots[++slot] = ULONG_MAX;
3551
3552 mas->depth = 1;
3553 mas_set_height(mas);
3554 ma_set_meta(node, maple_leaf_64, 0, slot);
3555 /* swap the new root into the tree */
3556 rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
3557 return slot;
3558 }
3559
mas_store_root(struct ma_state * mas,void * entry)3560 static inline void mas_store_root(struct ma_state *mas, void *entry)
3561 {
3562 if (likely((mas->last != 0) || (mas->index != 0)))
3563 mas_root_expand(mas, entry);
3564 else if (((unsigned long) (entry) & 3) == 2)
3565 mas_root_expand(mas, entry);
3566 else {
3567 rcu_assign_pointer(mas->tree->ma_root, entry);
3568 mas->node = MAS_START;
3569 }
3570 }
3571
3572 /*
3573 * mas_is_span_wr() - Check if the write needs to be treated as a write that
3574 * spans the node.
3575 * @mas: The maple state
3576 * @piv: The pivot value being written
3577 * @type: The maple node type
3578 * @entry: The data to write
3579 *
3580 * Spanning writes are writes that start in one node and end in another OR if
3581 * the write of a %NULL will cause the node to end with a %NULL.
3582 *
3583 * Return: True if this is a spanning write, false otherwise.
3584 */
mas_is_span_wr(struct ma_wr_state * wr_mas)3585 static bool mas_is_span_wr(struct ma_wr_state *wr_mas)
3586 {
3587 unsigned long max = wr_mas->r_max;
3588 unsigned long last = wr_mas->mas->last;
3589 enum maple_type type = wr_mas->type;
3590 void *entry = wr_mas->entry;
3591
3592 /* Contained in this pivot, fast path */
3593 if (last < max)
3594 return false;
3595
3596 if (ma_is_leaf(type)) {
3597 max = wr_mas->mas->max;
3598 if (last < max)
3599 return false;
3600 }
3601
3602 if (last == max) {
3603 /*
3604 * The last entry of leaf node cannot be NULL unless it is the
3605 * rightmost node (writing ULONG_MAX), otherwise it spans slots.
3606 */
3607 if (entry || last == ULONG_MAX)
3608 return false;
3609 }
3610
3611 trace_ma_write(__func__, wr_mas->mas, wr_mas->r_max, entry);
3612 return true;
3613 }
3614
mas_wr_walk_descend(struct ma_wr_state * wr_mas)3615 static inline void mas_wr_walk_descend(struct ma_wr_state *wr_mas)
3616 {
3617 wr_mas->type = mte_node_type(wr_mas->mas->node);
3618 mas_wr_node_walk(wr_mas);
3619 wr_mas->slots = ma_slots(wr_mas->node, wr_mas->type);
3620 }
3621
mas_wr_walk_traverse(struct ma_wr_state * wr_mas)3622 static inline void mas_wr_walk_traverse(struct ma_wr_state *wr_mas)
3623 {
3624 wr_mas->mas->max = wr_mas->r_max;
3625 wr_mas->mas->min = wr_mas->r_min;
3626 wr_mas->mas->node = wr_mas->content;
3627 wr_mas->mas->offset = 0;
3628 wr_mas->mas->depth++;
3629 }
3630 /*
3631 * mas_wr_walk() - Walk the tree for a write.
3632 * @wr_mas: The maple write state
3633 *
3634 * Uses mas_slot_locked() and does not need to worry about dead nodes.
3635 *
3636 * Return: True if it's contained in a node, false on spanning write.
3637 */
mas_wr_walk(struct ma_wr_state * wr_mas)3638 static bool mas_wr_walk(struct ma_wr_state *wr_mas)
3639 {
3640 struct ma_state *mas = wr_mas->mas;
3641
3642 while (true) {
3643 mas_wr_walk_descend(wr_mas);
3644 if (unlikely(mas_is_span_wr(wr_mas)))
3645 return false;
3646
3647 wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
3648 mas->offset);
3649 if (ma_is_leaf(wr_mas->type))
3650 return true;
3651
3652 mas_wr_walk_traverse(wr_mas);
3653 }
3654
3655 return true;
3656 }
3657
mas_wr_walk_index(struct ma_wr_state * wr_mas)3658 static bool mas_wr_walk_index(struct ma_wr_state *wr_mas)
3659 {
3660 struct ma_state *mas = wr_mas->mas;
3661
3662 while (true) {
3663 mas_wr_walk_descend(wr_mas);
3664 wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
3665 mas->offset);
3666 if (ma_is_leaf(wr_mas->type))
3667 return true;
3668 mas_wr_walk_traverse(wr_mas);
3669
3670 }
3671 return true;
3672 }
3673 /*
3674 * mas_extend_spanning_null() - Extend a store of a %NULL to include surrounding %NULLs.
3675 * @l_wr_mas: The left maple write state
3676 * @r_wr_mas: The right maple write state
3677 */
mas_extend_spanning_null(struct ma_wr_state * l_wr_mas,struct ma_wr_state * r_wr_mas)3678 static inline void mas_extend_spanning_null(struct ma_wr_state *l_wr_mas,
3679 struct ma_wr_state *r_wr_mas)
3680 {
3681 struct ma_state *r_mas = r_wr_mas->mas;
3682 struct ma_state *l_mas = l_wr_mas->mas;
3683 unsigned char l_slot;
3684
3685 l_slot = l_mas->offset;
3686 if (!l_wr_mas->content)
3687 l_mas->index = l_wr_mas->r_min;
3688
3689 if ((l_mas->index == l_wr_mas->r_min) &&
3690 (l_slot &&
3691 !mas_slot_locked(l_mas, l_wr_mas->slots, l_slot - 1))) {
3692 if (l_slot > 1)
3693 l_mas->index = l_wr_mas->pivots[l_slot - 2] + 1;
3694 else
3695 l_mas->index = l_mas->min;
3696
3697 l_mas->offset = l_slot - 1;
3698 }
3699
3700 if (!r_wr_mas->content) {
3701 if (r_mas->last < r_wr_mas->r_max)
3702 r_mas->last = r_wr_mas->r_max;
3703 r_mas->offset++;
3704 } else if ((r_mas->last == r_wr_mas->r_max) &&
3705 (r_mas->last < r_mas->max) &&
3706 !mas_slot_locked(r_mas, r_wr_mas->slots, r_mas->offset + 1)) {
3707 r_mas->last = mas_safe_pivot(r_mas, r_wr_mas->pivots,
3708 r_wr_mas->type, r_mas->offset + 1);
3709 r_mas->offset++;
3710 }
3711 }
3712
mas_state_walk(struct ma_state * mas)3713 static inline void *mas_state_walk(struct ma_state *mas)
3714 {
3715 void *entry;
3716
3717 entry = mas_start(mas);
3718 if (mas_is_none(mas))
3719 return NULL;
3720
3721 if (mas_is_ptr(mas))
3722 return entry;
3723
3724 return mtree_range_walk(mas);
3725 }
3726
3727 /*
3728 * mtree_lookup_walk() - Internal quick lookup that does not keep maple state up
3729 * to date.
3730 *
3731 * @mas: The maple state.
3732 *
3733 * Note: Leaves mas in undesirable state.
3734 * Return: The entry for @mas->index or %NULL on dead node.
3735 */
mtree_lookup_walk(struct ma_state * mas)3736 static inline void *mtree_lookup_walk(struct ma_state *mas)
3737 {
3738 unsigned long *pivots;
3739 unsigned char offset;
3740 struct maple_node *node;
3741 struct maple_enode *next;
3742 enum maple_type type;
3743 void __rcu **slots;
3744 unsigned char end;
3745 unsigned long max;
3746
3747 next = mas->node;
3748 max = ULONG_MAX;
3749 do {
3750 offset = 0;
3751 node = mte_to_node(next);
3752 type = mte_node_type(next);
3753 pivots = ma_pivots(node, type);
3754 end = ma_data_end(node, type, pivots, max);
3755 if (unlikely(ma_dead_node(node)))
3756 goto dead_node;
3757 do {
3758 if (pivots[offset] >= mas->index) {
3759 max = pivots[offset];
3760 break;
3761 }
3762 } while (++offset < end);
3763
3764 slots = ma_slots(node, type);
3765 next = mt_slot(mas->tree, slots, offset);
3766 if (unlikely(ma_dead_node(node)))
3767 goto dead_node;
3768 } while (!ma_is_leaf(type));
3769
3770 return (void *)next;
3771
3772 dead_node:
3773 mas_reset(mas);
3774 return NULL;
3775 }
3776
3777 static void mte_destroy_walk(struct maple_enode *, struct maple_tree *);
3778 /*
3779 * mas_new_root() - Create a new root node that only contains the entry passed
3780 * in.
3781 * @mas: The maple state
3782 * @entry: The entry to store.
3783 *
3784 * Only valid when the index == 0 and the last == ULONG_MAX
3785 *
3786 * Return 0 on error, 1 on success.
3787 */
mas_new_root(struct ma_state * mas,void * entry)3788 static inline int mas_new_root(struct ma_state *mas, void *entry)
3789 {
3790 struct maple_enode *root = mas_root_locked(mas);
3791 enum maple_type type = maple_leaf_64;
3792 struct maple_node *node;
3793 void __rcu **slots;
3794 unsigned long *pivots;
3795
3796 if (!entry && !mas->index && mas->last == ULONG_MAX) {
3797 mas->depth = 0;
3798 mas_set_height(mas);
3799 rcu_assign_pointer(mas->tree->ma_root, entry);
3800 mas->node = MAS_START;
3801 goto done;
3802 }
3803
3804 mas_node_count(mas, 1);
3805 if (mas_is_err(mas))
3806 return 0;
3807
3808 node = mas_pop_node(mas);
3809 pivots = ma_pivots(node, type);
3810 slots = ma_slots(node, type);
3811 node->parent = ma_parent_ptr(mas_tree_parent(mas));
3812 mas->node = mt_mk_node(node, type);
3813 rcu_assign_pointer(slots[0], entry);
3814 pivots[0] = mas->last;
3815 mas->depth = 1;
3816 mas_set_height(mas);
3817 rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
3818
3819 done:
3820 if (xa_is_node(root))
3821 mte_destroy_walk(root, mas->tree);
3822
3823 return 1;
3824 }
3825 /*
3826 * mas_wr_spanning_store() - Create a subtree with the store operation completed
3827 * and new nodes where necessary, then place the sub-tree in the actual tree.
3828 * Note that mas is expected to point to the node which caused the store to
3829 * span.
3830 * @wr_mas: The maple write state
3831 *
3832 * Return: 0 on error, positive on success.
3833 */
mas_wr_spanning_store(struct ma_wr_state * wr_mas)3834 static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas)
3835 {
3836 struct maple_subtree_state mast;
3837 struct maple_big_node b_node;
3838 struct ma_state *mas;
3839 unsigned char height;
3840
3841 /* Left and Right side of spanning store */
3842 MA_STATE(l_mas, NULL, 0, 0);
3843 MA_STATE(r_mas, NULL, 0, 0);
3844 MA_WR_STATE(r_wr_mas, &r_mas, wr_mas->entry);
3845 MA_WR_STATE(l_wr_mas, &l_mas, wr_mas->entry);
3846
3847 /*
3848 * A store operation that spans multiple nodes is called a spanning
3849 * store and is handled early in the store call stack by the function
3850 * mas_is_span_wr(). When a spanning store is identified, the maple
3851 * state is duplicated. The first maple state walks the left tree path
3852 * to ``index``, the duplicate walks the right tree path to ``last``.
3853 * The data in the two nodes are combined into a single node, two nodes,
3854 * or possibly three nodes (see the 3-way split above). A ``NULL``
3855 * written to the last entry of a node is considered a spanning store as
3856 * a rebalance is required for the operation to complete and an overflow
3857 * of data may happen.
3858 */
3859 mas = wr_mas->mas;
3860 trace_ma_op(__func__, mas);
3861
3862 if (unlikely(!mas->index && mas->last == ULONG_MAX))
3863 return mas_new_root(mas, wr_mas->entry);
3864 /*
3865 * Node rebalancing may occur due to this store, so there may be three new
3866 * entries per level plus a new root.
3867 */
3868 height = mas_mt_height(mas);
3869 mas_node_count(mas, 1 + height * 3);
3870 if (mas_is_err(mas))
3871 return 0;
3872
3873 /*
3874 * Set up right side. Need to get to the next offset after the spanning
3875 * store to ensure it's not NULL and to combine both the next node and
3876 * the node with the start together.
3877 */
3878 r_mas = *mas;
3879 /* Avoid overflow, walk to next slot in the tree. */
3880 if (r_mas.last + 1)
3881 r_mas.last++;
3882
3883 r_mas.index = r_mas.last;
3884 mas_wr_walk_index(&r_wr_mas);
3885 r_mas.last = r_mas.index = mas->last;
3886
3887 /* Set up left side. */
3888 l_mas = *mas;
3889 mas_wr_walk_index(&l_wr_mas);
3890
3891 if (!wr_mas->entry) {
3892 mas_extend_spanning_null(&l_wr_mas, &r_wr_mas);
3893 mas->offset = l_mas.offset;
3894 mas->index = l_mas.index;
3895 mas->last = l_mas.last = r_mas.last;
3896 }
3897
3898 /* expanding NULLs may make this cover the entire range */
3899 if (!l_mas.index && r_mas.last == ULONG_MAX) {
3900 mas_set_range(mas, 0, ULONG_MAX);
3901 return mas_new_root(mas, wr_mas->entry);
3902 }
3903
3904 memset(&b_node, 0, sizeof(struct maple_big_node));
3905 /* Copy l_mas and store the value in b_node. */
3906 mas_store_b_node(&l_wr_mas, &b_node, l_wr_mas.node_end);
3907 /* Copy r_mas into b_node. */
3908 if (r_mas.offset <= r_wr_mas.node_end)
3909 mas_mab_cp(&r_mas, r_mas.offset, r_wr_mas.node_end,
3910 &b_node, b_node.b_end + 1);
3911 else
3912 b_node.b_end++;
3913
3914 /* Stop spanning searches by searching for just index. */
3915 l_mas.index = l_mas.last = mas->index;
3916
3917 mast.bn = &b_node;
3918 mast.orig_l = &l_mas;
3919 mast.orig_r = &r_mas;
3920 /* Combine l_mas and r_mas and split them up evenly again. */
3921 return mas_spanning_rebalance(mas, &mast, height + 1);
3922 }
3923
3924 /*
3925 * mas_wr_node_store() - Attempt to store the value in a node
3926 * @wr_mas: The maple write state
3927 *
3928 * Attempts to reuse the node, but may allocate.
3929 *
3930 * Return: True if stored, false otherwise
3931 */
mas_wr_node_store(struct ma_wr_state * wr_mas,unsigned char new_end)3932 static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas,
3933 unsigned char new_end)
3934 {
3935 struct ma_state *mas = wr_mas->mas;
3936 void __rcu **dst_slots;
3937 unsigned long *dst_pivots;
3938 unsigned char dst_offset, offset_end = wr_mas->offset_end;
3939 struct maple_node reuse, *newnode;
3940 unsigned char copy_size, node_pivots = mt_pivots[wr_mas->type];
3941 bool in_rcu = mt_in_rcu(mas->tree);
3942
3943 /* Check if there is enough data. The room is enough. */
3944 if (!mte_is_root(mas->node) && (new_end <= mt_min_slots[wr_mas->type]) &&
3945 !(mas->mas_flags & MA_STATE_BULK))
3946 return false;
3947
3948 if (mas->last == wr_mas->end_piv)
3949 offset_end++; /* don't copy this offset */
3950 else if (unlikely(wr_mas->r_max == ULONG_MAX))
3951 mas_bulk_rebalance(mas, wr_mas->node_end, wr_mas->type);
3952
3953 /* set up node. */
3954 if (in_rcu) {
3955 mas_node_count(mas, 1);
3956 if (mas_is_err(mas))
3957 return false;
3958
3959 newnode = mas_pop_node(mas);
3960 } else {
3961 memset(&reuse, 0, sizeof(struct maple_node));
3962 newnode = &reuse;
3963 }
3964
3965 newnode->parent = mas_mn(mas)->parent;
3966 dst_pivots = ma_pivots(newnode, wr_mas->type);
3967 dst_slots = ma_slots(newnode, wr_mas->type);
3968 /* Copy from start to insert point */
3969 memcpy(dst_pivots, wr_mas->pivots, sizeof(unsigned long) * mas->offset);
3970 memcpy(dst_slots, wr_mas->slots, sizeof(void *) * mas->offset);
3971
3972 /* Handle insert of new range starting after old range */
3973 if (wr_mas->r_min < mas->index) {
3974 rcu_assign_pointer(dst_slots[mas->offset], wr_mas->content);
3975 dst_pivots[mas->offset++] = mas->index - 1;
3976 }
3977
3978 /* Store the new entry and range end. */
3979 if (mas->offset < node_pivots)
3980 dst_pivots[mas->offset] = mas->last;
3981 rcu_assign_pointer(dst_slots[mas->offset], wr_mas->entry);
3982
3983 /*
3984 * this range wrote to the end of the node or it overwrote the rest of
3985 * the data
3986 */
3987 if (offset_end > wr_mas->node_end)
3988 goto done;
3989
3990 dst_offset = mas->offset + 1;
3991 /* Copy to the end of node if necessary. */
3992 copy_size = wr_mas->node_end - offset_end + 1;
3993 memcpy(dst_slots + dst_offset, wr_mas->slots + offset_end,
3994 sizeof(void *) * copy_size);
3995 memcpy(dst_pivots + dst_offset, wr_mas->pivots + offset_end,
3996 sizeof(unsigned long) * (copy_size - 1));
3997
3998 if (new_end < node_pivots)
3999 dst_pivots[new_end] = mas->max;
4000
4001 done:
4002 mas_leaf_set_meta(mas, newnode, dst_pivots, maple_leaf_64, new_end);
4003 if (in_rcu) {
4004 struct maple_enode *old_enode = mas->node;
4005
4006 mas->node = mt_mk_node(newnode, wr_mas->type);
4007 mas_replace_node(mas, old_enode);
4008 } else {
4009 memcpy(wr_mas->node, newnode, sizeof(struct maple_node));
4010 }
4011 trace_ma_write(__func__, mas, 0, wr_mas->entry);
4012 mas_update_gap(mas);
4013 return true;
4014 }
4015
4016 /*
4017 * mas_wr_slot_store: Attempt to store a value in a slot.
4018 * @wr_mas: the maple write state
4019 *
4020 * Return: True if stored, false otherwise
4021 */
mas_wr_slot_store(struct ma_wr_state * wr_mas)4022 static inline bool mas_wr_slot_store(struct ma_wr_state *wr_mas)
4023 {
4024 struct ma_state *mas = wr_mas->mas;
4025 unsigned char offset = mas->offset;
4026 void __rcu **slots = wr_mas->slots;
4027 bool gap = false;
4028
4029 gap |= !mt_slot_locked(mas->tree, slots, offset);
4030 gap |= !mt_slot_locked(mas->tree, slots, offset + 1);
4031
4032 if (wr_mas->offset_end - offset == 1) {
4033 if (mas->index == wr_mas->r_min) {
4034 /* Overwriting the range and a part of the next one */
4035 rcu_assign_pointer(slots[offset], wr_mas->entry);
4036 wr_mas->pivots[offset] = mas->last;
4037 } else {
4038 /* Overwriting a part of the range and the next one */
4039 rcu_assign_pointer(slots[offset + 1], wr_mas->entry);
4040 wr_mas->pivots[offset] = mas->index - 1;
4041 mas->offset++; /* Keep mas accurate. */
4042 }
4043 } else if (!mt_in_rcu(mas->tree)) {
4044 /*
4045 * Expand the range, only partially overwriting the previous and
4046 * next ranges
4047 */
4048 gap |= !mt_slot_locked(mas->tree, slots, offset + 2);
4049 rcu_assign_pointer(slots[offset + 1], wr_mas->entry);
4050 wr_mas->pivots[offset] = mas->index - 1;
4051 wr_mas->pivots[offset + 1] = mas->last;
4052 mas->offset++; /* Keep mas accurate. */
4053 } else {
4054 return false;
4055 }
4056
4057 trace_ma_write(__func__, mas, 0, wr_mas->entry);
4058 /*
4059 * Only update gap when the new entry is empty or there is an empty
4060 * entry in the original two ranges.
4061 */
4062 if (!wr_mas->entry || gap)
4063 mas_update_gap(mas);
4064
4065 return true;
4066 }
4067
mas_wr_extend_null(struct ma_wr_state * wr_mas)4068 static inline void mas_wr_extend_null(struct ma_wr_state *wr_mas)
4069 {
4070 struct ma_state *mas = wr_mas->mas;
4071
4072 if (!wr_mas->slots[wr_mas->offset_end]) {
4073 /* If this one is null, the next and prev are not */
4074 mas->last = wr_mas->end_piv;
4075 } else {
4076 /* Check next slot(s) if we are overwriting the end */
4077 if ((mas->last == wr_mas->end_piv) &&
4078 (wr_mas->node_end != wr_mas->offset_end) &&
4079 !wr_mas->slots[wr_mas->offset_end + 1]) {
4080 wr_mas->offset_end++;
4081 if (wr_mas->offset_end == wr_mas->node_end)
4082 mas->last = mas->max;
4083 else
4084 mas->last = wr_mas->pivots[wr_mas->offset_end];
4085 wr_mas->end_piv = mas->last;
4086 }
4087 }
4088
4089 if (!wr_mas->content) {
4090 /* If this one is null, the next and prev are not */
4091 mas->index = wr_mas->r_min;
4092 } else {
4093 /* Check prev slot if we are overwriting the start */
4094 if (mas->index == wr_mas->r_min && mas->offset &&
4095 !wr_mas->slots[mas->offset - 1]) {
4096 mas->offset--;
4097 wr_mas->r_min = mas->index =
4098 mas_safe_min(mas, wr_mas->pivots, mas->offset);
4099 wr_mas->r_max = wr_mas->pivots[mas->offset];
4100 }
4101 }
4102 }
4103
mas_wr_end_piv(struct ma_wr_state * wr_mas)4104 static inline void mas_wr_end_piv(struct ma_wr_state *wr_mas)
4105 {
4106 while ((wr_mas->offset_end < wr_mas->node_end) &&
4107 (wr_mas->mas->last > wr_mas->pivots[wr_mas->offset_end]))
4108 wr_mas->offset_end++;
4109
4110 if (wr_mas->offset_end < wr_mas->node_end)
4111 wr_mas->end_piv = wr_mas->pivots[wr_mas->offset_end];
4112 else
4113 wr_mas->end_piv = wr_mas->mas->max;
4114
4115 if (!wr_mas->entry)
4116 mas_wr_extend_null(wr_mas);
4117 }
4118
mas_wr_new_end(struct ma_wr_state * wr_mas)4119 static inline unsigned char mas_wr_new_end(struct ma_wr_state *wr_mas)
4120 {
4121 struct ma_state *mas = wr_mas->mas;
4122 unsigned char new_end = wr_mas->node_end + 2;
4123
4124 new_end -= wr_mas->offset_end - mas->offset;
4125 if (wr_mas->r_min == mas->index)
4126 new_end--;
4127
4128 if (wr_mas->end_piv == mas->last)
4129 new_end--;
4130
4131 return new_end;
4132 }
4133
4134 /*
4135 * mas_wr_append: Attempt to append
4136 * @wr_mas: the maple write state
4137 * @new_end: The end of the node after the modification
4138 *
4139 * This is currently unsafe in rcu mode since the end of the node may be cached
4140 * by readers while the node contents may be updated which could result in
4141 * inaccurate information.
4142 *
4143 * Return: True if appended, false otherwise
4144 */
mas_wr_append(struct ma_wr_state * wr_mas,unsigned char new_end)4145 static inline bool mas_wr_append(struct ma_wr_state *wr_mas,
4146 unsigned char new_end)
4147 {
4148 struct ma_state *mas;
4149 void __rcu **slots;
4150 unsigned char end;
4151
4152 mas = wr_mas->mas;
4153 if (mt_in_rcu(mas->tree))
4154 return false;
4155
4156 if (mas->offset != wr_mas->node_end)
4157 return false;
4158
4159 end = wr_mas->node_end;
4160 if (mas->offset != end)
4161 return false;
4162
4163 if (new_end < mt_pivots[wr_mas->type]) {
4164 wr_mas->pivots[new_end] = wr_mas->pivots[end];
4165 ma_set_meta(wr_mas->node, wr_mas->type, 0, new_end);
4166 }
4167
4168 slots = wr_mas->slots;
4169 if (new_end == end + 1) {
4170 if (mas->last == wr_mas->r_max) {
4171 /* Append to end of range */
4172 rcu_assign_pointer(slots[new_end], wr_mas->entry);
4173 wr_mas->pivots[end] = mas->index - 1;
4174 mas->offset = new_end;
4175 } else {
4176 /* Append to start of range */
4177 rcu_assign_pointer(slots[new_end], wr_mas->content);
4178 wr_mas->pivots[end] = mas->last;
4179 rcu_assign_pointer(slots[end], wr_mas->entry);
4180 }
4181 } else {
4182 /* Append to the range without touching any boundaries. */
4183 rcu_assign_pointer(slots[new_end], wr_mas->content);
4184 wr_mas->pivots[end + 1] = mas->last;
4185 rcu_assign_pointer(slots[end + 1], wr_mas->entry);
4186 wr_mas->pivots[end] = mas->index - 1;
4187 mas->offset = end + 1;
4188 }
4189
4190 if (!wr_mas->content || !wr_mas->entry)
4191 mas_update_gap(mas);
4192
4193 trace_ma_write(__func__, mas, new_end, wr_mas->entry);
4194 return true;
4195 }
4196
4197 /*
4198 * mas_wr_bnode() - Slow path for a modification.
4199 * @wr_mas: The write maple state
4200 *
4201 * This is where split, rebalance end up.
4202 */
mas_wr_bnode(struct ma_wr_state * wr_mas)4203 static void mas_wr_bnode(struct ma_wr_state *wr_mas)
4204 {
4205 struct maple_big_node b_node;
4206
4207 trace_ma_write(__func__, wr_mas->mas, 0, wr_mas->entry);
4208 memset(&b_node, 0, sizeof(struct maple_big_node));
4209 mas_store_b_node(wr_mas, &b_node, wr_mas->offset_end);
4210 mas_commit_b_node(wr_mas, &b_node, wr_mas->node_end);
4211 }
4212
mas_wr_modify(struct ma_wr_state * wr_mas)4213 static inline void mas_wr_modify(struct ma_wr_state *wr_mas)
4214 {
4215 struct ma_state *mas = wr_mas->mas;
4216 unsigned char new_end;
4217
4218 /* Direct replacement */
4219 if (wr_mas->r_min == mas->index && wr_mas->r_max == mas->last) {
4220 rcu_assign_pointer(wr_mas->slots[mas->offset], wr_mas->entry);
4221 if (!!wr_mas->entry ^ !!wr_mas->content)
4222 mas_update_gap(mas);
4223 return;
4224 }
4225
4226 /*
4227 * new_end exceeds the size of the maple node and cannot enter the fast
4228 * path.
4229 */
4230 new_end = mas_wr_new_end(wr_mas);
4231 if (new_end >= mt_slots[wr_mas->type])
4232 goto slow_path;
4233
4234 /* Attempt to append */
4235 if (mas_wr_append(wr_mas, new_end))
4236 return;
4237
4238 if (new_end == wr_mas->node_end && mas_wr_slot_store(wr_mas))
4239 return;
4240
4241 if (mas_wr_node_store(wr_mas, new_end))
4242 return;
4243
4244 if (mas_is_err(mas))
4245 return;
4246
4247 slow_path:
4248 mas_wr_bnode(wr_mas);
4249 }
4250
4251 /*
4252 * mas_wr_store_entry() - Internal call to store a value
4253 * @mas: The maple state
4254 * @entry: The entry to store.
4255 *
4256 * Return: The contents that was stored at the index.
4257 */
mas_wr_store_entry(struct ma_wr_state * wr_mas)4258 static inline void *mas_wr_store_entry(struct ma_wr_state *wr_mas)
4259 {
4260 struct ma_state *mas = wr_mas->mas;
4261
4262 wr_mas->content = mas_start(mas);
4263 if (mas_is_none(mas) || mas_is_ptr(mas)) {
4264 mas_store_root(mas, wr_mas->entry);
4265 return wr_mas->content;
4266 }
4267
4268 if (unlikely(!mas_wr_walk(wr_mas))) {
4269 mas_wr_spanning_store(wr_mas);
4270 return wr_mas->content;
4271 }
4272
4273 /* At this point, we are at the leaf node that needs to be altered. */
4274 mas_wr_end_piv(wr_mas);
4275 /* New root for a single pointer */
4276 if (unlikely(!mas->index && mas->last == ULONG_MAX)) {
4277 mas_new_root(mas, wr_mas->entry);
4278 return wr_mas->content;
4279 }
4280
4281 mas_wr_modify(wr_mas);
4282 return wr_mas->content;
4283 }
4284
4285 /**
4286 * mas_insert() - Internal call to insert a value
4287 * @mas: The maple state
4288 * @entry: The entry to store
4289 *
4290 * Return: %NULL or the contents that already exists at the requested index
4291 * otherwise. The maple state needs to be checked for error conditions.
4292 */
mas_insert(struct ma_state * mas,void * entry)4293 static inline void *mas_insert(struct ma_state *mas, void *entry)
4294 {
4295 MA_WR_STATE(wr_mas, mas, entry);
4296
4297 /*
4298 * Inserting a new range inserts either 0, 1, or 2 pivots within the
4299 * tree. If the insert fits exactly into an existing gap with a value
4300 * of NULL, then the slot only needs to be written with the new value.
4301 * If the range being inserted is adjacent to another range, then only a
4302 * single pivot needs to be inserted (as well as writing the entry). If
4303 * the new range is within a gap but does not touch any other ranges,
4304 * then two pivots need to be inserted: the start - 1, and the end. As
4305 * usual, the entry must be written. Most operations require a new node
4306 * to be allocated and replace an existing node to ensure RCU safety,
4307 * when in RCU mode. The exception to requiring a newly allocated node
4308 * is when inserting at the end of a node (appending). When done
4309 * carefully, appending can reuse the node in place.
4310 */
4311 wr_mas.content = mas_start(mas);
4312 if (wr_mas.content)
4313 goto exists;
4314
4315 if (mas_is_none(mas) || mas_is_ptr(mas)) {
4316 mas_store_root(mas, entry);
4317 return NULL;
4318 }
4319
4320 /* spanning writes always overwrite something */
4321 if (!mas_wr_walk(&wr_mas))
4322 goto exists;
4323
4324 /* At this point, we are at the leaf node that needs to be altered. */
4325 wr_mas.offset_end = mas->offset;
4326 wr_mas.end_piv = wr_mas.r_max;
4327
4328 if (wr_mas.content || (mas->last > wr_mas.r_max))
4329 goto exists;
4330
4331 if (!entry)
4332 return NULL;
4333
4334 mas_wr_modify(&wr_mas);
4335 return wr_mas.content;
4336
4337 exists:
4338 mas_set_err(mas, -EEXIST);
4339 return wr_mas.content;
4340
4341 }
4342
mas_rewalk(struct ma_state * mas,unsigned long index)4343 static inline void mas_rewalk(struct ma_state *mas, unsigned long index)
4344 {
4345 retry:
4346 mas_set(mas, index);
4347 mas_state_walk(mas);
4348 if (mas_is_start(mas))
4349 goto retry;
4350 }
4351
mas_rewalk_if_dead(struct ma_state * mas,struct maple_node * node,const unsigned long index)4352 static inline bool mas_rewalk_if_dead(struct ma_state *mas,
4353 struct maple_node *node, const unsigned long index)
4354 {
4355 if (unlikely(ma_dead_node(node))) {
4356 mas_rewalk(mas, index);
4357 return true;
4358 }
4359 return false;
4360 }
4361
4362 /*
4363 * mas_prev_node() - Find the prev non-null entry at the same level in the
4364 * tree. The prev value will be mas->node[mas->offset] or MAS_NONE.
4365 * @mas: The maple state
4366 * @min: The lower limit to search
4367 *
4368 * The prev node value will be mas->node[mas->offset] or MAS_NONE.
4369 * Return: 1 if the node is dead, 0 otherwise.
4370 */
mas_prev_node(struct ma_state * mas,unsigned long min)4371 static inline int mas_prev_node(struct ma_state *mas, unsigned long min)
4372 {
4373 enum maple_type mt;
4374 int offset, level;
4375 void __rcu **slots;
4376 struct maple_node *node;
4377 unsigned long *pivots;
4378 unsigned long max;
4379
4380 node = mas_mn(mas);
4381 if (!mas->min)
4382 goto no_entry;
4383
4384 max = mas->min - 1;
4385 if (max < min)
4386 goto no_entry;
4387
4388 level = 0;
4389 do {
4390 if (ma_is_root(node))
4391 goto no_entry;
4392
4393 /* Walk up. */
4394 if (unlikely(mas_ascend(mas)))
4395 return 1;
4396 offset = mas->offset;
4397 level++;
4398 node = mas_mn(mas);
4399 } while (!offset);
4400
4401 offset--;
4402 mt = mte_node_type(mas->node);
4403 while (level > 1) {
4404 level--;
4405 slots = ma_slots(node, mt);
4406 mas->node = mas_slot(mas, slots, offset);
4407 if (unlikely(ma_dead_node(node)))
4408 return 1;
4409
4410 mt = mte_node_type(mas->node);
4411 node = mas_mn(mas);
4412 pivots = ma_pivots(node, mt);
4413 offset = ma_data_end(node, mt, pivots, max);
4414 if (unlikely(ma_dead_node(node)))
4415 return 1;
4416 }
4417
4418 slots = ma_slots(node, mt);
4419 mas->node = mas_slot(mas, slots, offset);
4420 pivots = ma_pivots(node, mt);
4421 if (unlikely(ma_dead_node(node)))
4422 return 1;
4423
4424 if (likely(offset))
4425 mas->min = pivots[offset - 1] + 1;
4426 mas->max = max;
4427 mas->offset = mas_data_end(mas);
4428 if (unlikely(mte_dead_node(mas->node)))
4429 return 1;
4430
4431 return 0;
4432
4433 no_entry:
4434 if (unlikely(ma_dead_node(node)))
4435 return 1;
4436
4437 mas->node = MAS_NONE;
4438 return 0;
4439 }
4440
4441 /*
4442 * mas_prev_slot() - Get the entry in the previous slot
4443 *
4444 * @mas: The maple state
4445 * @max: The minimum starting range
4446 * @empty: Can be empty
4447 * @set_underflow: Set the @mas->node to underflow state on limit.
4448 *
4449 * Return: The entry in the previous slot which is possibly NULL
4450 */
mas_prev_slot(struct ma_state * mas,unsigned long min,bool empty,bool set_underflow)4451 static void *mas_prev_slot(struct ma_state *mas, unsigned long min, bool empty,
4452 bool set_underflow)
4453 {
4454 void *entry;
4455 void __rcu **slots;
4456 unsigned long pivot;
4457 enum maple_type type;
4458 unsigned long *pivots;
4459 struct maple_node *node;
4460 unsigned long save_point = mas->index;
4461
4462 retry:
4463 node = mas_mn(mas);
4464 type = mte_node_type(mas->node);
4465 pivots = ma_pivots(node, type);
4466 if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
4467 goto retry;
4468
4469 if (mas->min <= min) {
4470 pivot = mas_safe_min(mas, pivots, mas->offset);
4471
4472 if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
4473 goto retry;
4474
4475 if (pivot <= min)
4476 goto underflow;
4477 }
4478
4479 again:
4480 if (likely(mas->offset)) {
4481 mas->offset--;
4482 mas->last = mas->index - 1;
4483 mas->index = mas_safe_min(mas, pivots, mas->offset);
4484 } else {
4485 if (mas_prev_node(mas, min)) {
4486 mas_rewalk(mas, save_point);
4487 goto retry;
4488 }
4489
4490 if (mas_is_none(mas))
4491 goto underflow;
4492
4493 mas->last = mas->max;
4494 node = mas_mn(mas);
4495 type = mte_node_type(mas->node);
4496 pivots = ma_pivots(node, type);
4497 mas->index = pivots[mas->offset - 1] + 1;
4498 }
4499
4500 slots = ma_slots(node, type);
4501 entry = mas_slot(mas, slots, mas->offset);
4502 if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
4503 goto retry;
4504
4505 if (likely(entry))
4506 return entry;
4507
4508 if (!empty) {
4509 if (mas->index <= min)
4510 goto underflow;
4511
4512 goto again;
4513 }
4514
4515 return entry;
4516
4517 underflow:
4518 if (set_underflow)
4519 mas->node = MAS_UNDERFLOW;
4520 return NULL;
4521 }
4522
4523 /*
4524 * mas_next_node() - Get the next node at the same level in the tree.
4525 * @mas: The maple state
4526 * @max: The maximum pivot value to check.
4527 *
4528 * The next value will be mas->node[mas->offset] or MAS_NONE.
4529 * Return: 1 on dead node, 0 otherwise.
4530 */
mas_next_node(struct ma_state * mas,struct maple_node * node,unsigned long max)4531 static inline int mas_next_node(struct ma_state *mas, struct maple_node *node,
4532 unsigned long max)
4533 {
4534 unsigned long min;
4535 unsigned long *pivots;
4536 struct maple_enode *enode;
4537 int level = 0;
4538 unsigned char node_end;
4539 enum maple_type mt;
4540 void __rcu **slots;
4541
4542 if (mas->max >= max)
4543 goto no_entry;
4544
4545 min = mas->max + 1;
4546 level = 0;
4547 do {
4548 if (ma_is_root(node))
4549 goto no_entry;
4550
4551 /* Walk up. */
4552 if (unlikely(mas_ascend(mas)))
4553 return 1;
4554
4555 level++;
4556 node = mas_mn(mas);
4557 mt = mte_node_type(mas->node);
4558 pivots = ma_pivots(node, mt);
4559 node_end = ma_data_end(node, mt, pivots, mas->max);
4560 if (unlikely(ma_dead_node(node)))
4561 return 1;
4562
4563 } while (unlikely(mas->offset == node_end));
4564
4565 slots = ma_slots(node, mt);
4566 mas->offset++;
4567 enode = mas_slot(mas, slots, mas->offset);
4568 if (unlikely(ma_dead_node(node)))
4569 return 1;
4570
4571 if (level > 1)
4572 mas->offset = 0;
4573
4574 while (unlikely(level > 1)) {
4575 level--;
4576 mas->node = enode;
4577 node = mas_mn(mas);
4578 mt = mte_node_type(mas->node);
4579 slots = ma_slots(node, mt);
4580 enode = mas_slot(mas, slots, 0);
4581 if (unlikely(ma_dead_node(node)))
4582 return 1;
4583 }
4584
4585 if (!mas->offset)
4586 pivots = ma_pivots(node, mt);
4587
4588 mas->max = mas_safe_pivot(mas, pivots, mas->offset, mt);
4589 if (unlikely(ma_dead_node(node)))
4590 return 1;
4591
4592 mas->node = enode;
4593 mas->min = min;
4594 return 0;
4595
4596 no_entry:
4597 if (unlikely(ma_dead_node(node)))
4598 return 1;
4599
4600 mas->node = MAS_NONE;
4601 return 0;
4602 }
4603
4604 /*
4605 * mas_next_slot() - Get the entry in the next slot
4606 *
4607 * @mas: The maple state
4608 * @max: The maximum starting range
4609 * @empty: Can be empty
4610 * @set_overflow: Should @mas->node be set to overflow when the limit is
4611 * reached.
4612 *
4613 * Return: The entry in the next slot which is possibly NULL
4614 */
mas_next_slot(struct ma_state * mas,unsigned long max,bool empty,bool set_overflow)4615 static void *mas_next_slot(struct ma_state *mas, unsigned long max, bool empty,
4616 bool set_overflow)
4617 {
4618 void __rcu **slots;
4619 unsigned long *pivots;
4620 unsigned long pivot;
4621 enum maple_type type;
4622 struct maple_node *node;
4623 unsigned char data_end;
4624 unsigned long save_point = mas->last;
4625 void *entry;
4626
4627 retry:
4628 node = mas_mn(mas);
4629 type = mte_node_type(mas->node);
4630 pivots = ma_pivots(node, type);
4631 data_end = ma_data_end(node, type, pivots, mas->max);
4632 if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
4633 goto retry;
4634
4635 if (mas->max >= max) {
4636 if (likely(mas->offset < data_end))
4637 pivot = pivots[mas->offset];
4638 else
4639 goto overflow;
4640
4641 if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
4642 goto retry;
4643
4644 if (pivot >= max)
4645 goto overflow;
4646 }
4647
4648 if (likely(mas->offset < data_end)) {
4649 mas->index = pivots[mas->offset] + 1;
4650 again:
4651 mas->offset++;
4652 if (likely(mas->offset < data_end))
4653 mas->last = pivots[mas->offset];
4654 else
4655 mas->last = mas->max;
4656 } else {
4657 if (mas_next_node(mas, node, max)) {
4658 mas_rewalk(mas, save_point);
4659 goto retry;
4660 }
4661
4662 if (WARN_ON_ONCE(mas_is_none(mas))) {
4663 mas->node = MAS_OVERFLOW;
4664 return NULL;
4665 goto overflow;
4666 }
4667
4668 mas->offset = 0;
4669 mas->index = mas->min;
4670 node = mas_mn(mas);
4671 type = mte_node_type(mas->node);
4672 pivots = ma_pivots(node, type);
4673 mas->last = pivots[0];
4674 }
4675
4676 slots = ma_slots(node, type);
4677 entry = mt_slot(mas->tree, slots, mas->offset);
4678 if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
4679 goto retry;
4680
4681 if (entry)
4682 return entry;
4683
4684 if (!empty) {
4685 if (mas->last >= max)
4686 goto overflow;
4687
4688 mas->index = mas->last + 1;
4689 /* Node cannot end on NULL, so it's safe to short-cut here */
4690 goto again;
4691 }
4692
4693 return entry;
4694
4695 overflow:
4696 if (set_overflow)
4697 mas->node = MAS_OVERFLOW;
4698 return NULL;
4699 }
4700
4701 /*
4702 * mas_next_entry() - Internal function to get the next entry.
4703 * @mas: The maple state
4704 * @limit: The maximum range start.
4705 *
4706 * Set the @mas->node to the next entry and the range_start to
4707 * the beginning value for the entry. Does not check beyond @limit.
4708 * Sets @mas->index and @mas->last to the range, Does not update @mas->index and
4709 * @mas->last on overflow.
4710 * Restarts on dead nodes.
4711 *
4712 * Return: the next entry or %NULL.
4713 */
mas_next_entry(struct ma_state * mas,unsigned long limit)4714 static inline void *mas_next_entry(struct ma_state *mas, unsigned long limit)
4715 {
4716 if (mas->last >= limit) {
4717 mas->node = MAS_OVERFLOW;
4718 return NULL;
4719 }
4720
4721 return mas_next_slot(mas, limit, false, true);
4722 }
4723
4724 /*
4725 * mas_rev_awalk() - Internal function. Reverse allocation walk. Find the
4726 * highest gap address of a given size in a given node and descend.
4727 * @mas: The maple state
4728 * @size: The needed size.
4729 *
4730 * Return: True if found in a leaf, false otherwise.
4731 *
4732 */
mas_rev_awalk(struct ma_state * mas,unsigned long size,unsigned long * gap_min,unsigned long * gap_max)4733 static bool mas_rev_awalk(struct ma_state *mas, unsigned long size,
4734 unsigned long *gap_min, unsigned long *gap_max)
4735 {
4736 enum maple_type type = mte_node_type(mas->node);
4737 struct maple_node *node = mas_mn(mas);
4738 unsigned long *pivots, *gaps;
4739 void __rcu **slots;
4740 unsigned long gap = 0;
4741 unsigned long max, min;
4742 unsigned char offset;
4743
4744 if (unlikely(mas_is_err(mas)))
4745 return true;
4746
4747 if (ma_is_dense(type)) {
4748 /* dense nodes. */
4749 mas->offset = (unsigned char)(mas->index - mas->min);
4750 return true;
4751 }
4752
4753 pivots = ma_pivots(node, type);
4754 slots = ma_slots(node, type);
4755 gaps = ma_gaps(node, type);
4756 offset = mas->offset;
4757 min = mas_safe_min(mas, pivots, offset);
4758 /* Skip out of bounds. */
4759 while (mas->last < min)
4760 min = mas_safe_min(mas, pivots, --offset);
4761
4762 max = mas_safe_pivot(mas, pivots, offset, type);
4763 while (mas->index <= max) {
4764 gap = 0;
4765 if (gaps)
4766 gap = gaps[offset];
4767 else if (!mas_slot(mas, slots, offset))
4768 gap = max - min + 1;
4769
4770 if (gap) {
4771 if ((size <= gap) && (size <= mas->last - min + 1))
4772 break;
4773
4774 if (!gaps) {
4775 /* Skip the next slot, it cannot be a gap. */
4776 if (offset < 2)
4777 goto ascend;
4778
4779 offset -= 2;
4780 max = pivots[offset];
4781 min = mas_safe_min(mas, pivots, offset);
4782 continue;
4783 }
4784 }
4785
4786 if (!offset)
4787 goto ascend;
4788
4789 offset--;
4790 max = min - 1;
4791 min = mas_safe_min(mas, pivots, offset);
4792 }
4793
4794 if (unlikely((mas->index > max) || (size - 1 > max - mas->index)))
4795 goto no_space;
4796
4797 if (unlikely(ma_is_leaf(type))) {
4798 mas->offset = offset;
4799 *gap_min = min;
4800 *gap_max = min + gap - 1;
4801 return true;
4802 }
4803
4804 /* descend, only happens under lock. */
4805 mas->node = mas_slot(mas, slots, offset);
4806 mas->min = min;
4807 mas->max = max;
4808 mas->offset = mas_data_end(mas);
4809 return false;
4810
4811 ascend:
4812 if (!mte_is_root(mas->node))
4813 return false;
4814
4815 no_space:
4816 mas_set_err(mas, -EBUSY);
4817 return false;
4818 }
4819
mas_anode_descend(struct ma_state * mas,unsigned long size)4820 static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size)
4821 {
4822 enum maple_type type = mte_node_type(mas->node);
4823 unsigned long pivot, min, gap = 0;
4824 unsigned char offset, data_end;
4825 unsigned long *gaps, *pivots;
4826 void __rcu **slots;
4827 struct maple_node *node;
4828 bool found = false;
4829
4830 if (ma_is_dense(type)) {
4831 mas->offset = (unsigned char)(mas->index - mas->min);
4832 return true;
4833 }
4834
4835 node = mas_mn(mas);
4836 pivots = ma_pivots(node, type);
4837 slots = ma_slots(node, type);
4838 gaps = ma_gaps(node, type);
4839 offset = mas->offset;
4840 min = mas_safe_min(mas, pivots, offset);
4841 data_end = ma_data_end(node, type, pivots, mas->max);
4842 for (; offset <= data_end; offset++) {
4843 pivot = mas_safe_pivot(mas, pivots, offset, type);
4844
4845 /* Not within lower bounds */
4846 if (mas->index > pivot)
4847 goto next_slot;
4848
4849 if (gaps)
4850 gap = gaps[offset];
4851 else if (!mas_slot(mas, slots, offset))
4852 gap = min(pivot, mas->last) - max(mas->index, min) + 1;
4853 else
4854 goto next_slot;
4855
4856 if (gap >= size) {
4857 if (ma_is_leaf(type)) {
4858 found = true;
4859 goto done;
4860 }
4861 if (mas->index <= pivot) {
4862 mas->node = mas_slot(mas, slots, offset);
4863 mas->min = min;
4864 mas->max = pivot;
4865 offset = 0;
4866 break;
4867 }
4868 }
4869 next_slot:
4870 min = pivot + 1;
4871 if (mas->last <= pivot) {
4872 mas_set_err(mas, -EBUSY);
4873 return true;
4874 }
4875 }
4876
4877 if (mte_is_root(mas->node))
4878 found = true;
4879 done:
4880 mas->offset = offset;
4881 return found;
4882 }
4883
4884 /**
4885 * mas_walk() - Search for @mas->index in the tree.
4886 * @mas: The maple state.
4887 *
4888 * mas->index and mas->last will be set to the range if there is a value. If
4889 * mas->node is MAS_NONE, reset to MAS_START.
4890 *
4891 * Return: the entry at the location or %NULL.
4892 */
mas_walk(struct ma_state * mas)4893 void *mas_walk(struct ma_state *mas)
4894 {
4895 void *entry;
4896
4897 if (!mas_is_active(mas) || !mas_is_start(mas))
4898 mas->node = MAS_START;
4899 retry:
4900 entry = mas_state_walk(mas);
4901 if (mas_is_start(mas)) {
4902 goto retry;
4903 } else if (mas_is_none(mas)) {
4904 mas->index = 0;
4905 mas->last = ULONG_MAX;
4906 } else if (mas_is_ptr(mas)) {
4907 if (!mas->index) {
4908 mas->last = 0;
4909 return entry;
4910 }
4911
4912 mas->index = 1;
4913 mas->last = ULONG_MAX;
4914 mas->node = MAS_NONE;
4915 return NULL;
4916 }
4917
4918 return entry;
4919 }
4920 EXPORT_SYMBOL_GPL(mas_walk);
4921
mas_rewind_node(struct ma_state * mas)4922 static inline bool mas_rewind_node(struct ma_state *mas)
4923 {
4924 unsigned char slot;
4925
4926 do {
4927 if (mte_is_root(mas->node)) {
4928 slot = mas->offset;
4929 if (!slot)
4930 return false;
4931 } else {
4932 mas_ascend(mas);
4933 slot = mas->offset;
4934 }
4935 } while (!slot);
4936
4937 mas->offset = --slot;
4938 return true;
4939 }
4940
4941 /*
4942 * mas_skip_node() - Internal function. Skip over a node.
4943 * @mas: The maple state.
4944 *
4945 * Return: true if there is another node, false otherwise.
4946 */
mas_skip_node(struct ma_state * mas)4947 static inline bool mas_skip_node(struct ma_state *mas)
4948 {
4949 if (mas_is_err(mas))
4950 return false;
4951
4952 do {
4953 if (mte_is_root(mas->node)) {
4954 if (mas->offset >= mas_data_end(mas)) {
4955 mas_set_err(mas, -EBUSY);
4956 return false;
4957 }
4958 } else {
4959 mas_ascend(mas);
4960 }
4961 } while (mas->offset >= mas_data_end(mas));
4962
4963 mas->offset++;
4964 return true;
4965 }
4966
4967 /*
4968 * mas_awalk() - Allocation walk. Search from low address to high, for a gap of
4969 * @size
4970 * @mas: The maple state
4971 * @size: The size of the gap required
4972 *
4973 * Search between @mas->index and @mas->last for a gap of @size.
4974 */
mas_awalk(struct ma_state * mas,unsigned long size)4975 static inline void mas_awalk(struct ma_state *mas, unsigned long size)
4976 {
4977 struct maple_enode *last = NULL;
4978
4979 /*
4980 * There are 4 options:
4981 * go to child (descend)
4982 * go back to parent (ascend)
4983 * no gap found. (return, slot == MAPLE_NODE_SLOTS)
4984 * found the gap. (return, slot != MAPLE_NODE_SLOTS)
4985 */
4986 while (!mas_is_err(mas) && !mas_anode_descend(mas, size)) {
4987 if (last == mas->node)
4988 mas_skip_node(mas);
4989 else
4990 last = mas->node;
4991 }
4992 }
4993
4994 /*
4995 * mas_sparse_area() - Internal function. Return upper or lower limit when
4996 * searching for a gap in an empty tree.
4997 * @mas: The maple state
4998 * @min: the minimum range
4999 * @max: The maximum range
5000 * @size: The size of the gap
5001 * @fwd: Searching forward or back
5002 */
mas_sparse_area(struct ma_state * mas,unsigned long min,unsigned long max,unsigned long size,bool fwd)5003 static inline int mas_sparse_area(struct ma_state *mas, unsigned long min,
5004 unsigned long max, unsigned long size, bool fwd)
5005 {
5006 if (!unlikely(mas_is_none(mas)) && min == 0) {
5007 min++;
5008 /*
5009 * At this time, min is increased, we need to recheck whether
5010 * the size is satisfied.
5011 */
5012 if (min > max || max - min + 1 < size)
5013 return -EBUSY;
5014 }
5015 /* mas_is_ptr */
5016
5017 if (fwd) {
5018 mas->index = min;
5019 mas->last = min + size - 1;
5020 } else {
5021 mas->last = max;
5022 mas->index = max - size + 1;
5023 }
5024 return 0;
5025 }
5026
5027 /*
5028 * mas_empty_area() - Get the lowest address within the range that is
5029 * sufficient for the size requested.
5030 * @mas: The maple state
5031 * @min: The lowest value of the range
5032 * @max: The highest value of the range
5033 * @size: The size needed
5034 */
mas_empty_area(struct ma_state * mas,unsigned long min,unsigned long max,unsigned long size)5035 int mas_empty_area(struct ma_state *mas, unsigned long min,
5036 unsigned long max, unsigned long size)
5037 {
5038 unsigned char offset;
5039 unsigned long *pivots;
5040 enum maple_type mt;
5041
5042 if (min > max)
5043 return -EINVAL;
5044
5045 if (size == 0 || max - min < size - 1)
5046 return -EINVAL;
5047
5048 if (mas_is_start(mas))
5049 mas_start(mas);
5050 else if (mas->offset >= 2)
5051 mas->offset -= 2;
5052 else if (!mas_skip_node(mas))
5053 return -EBUSY;
5054
5055 /* Empty set */
5056 if (mas_is_none(mas) || mas_is_ptr(mas))
5057 return mas_sparse_area(mas, min, max, size, true);
5058
5059 /* The start of the window can only be within these values */
5060 mas->index = min;
5061 mas->last = max;
5062 mas_awalk(mas, size);
5063
5064 if (unlikely(mas_is_err(mas)))
5065 return xa_err(mas->node);
5066
5067 offset = mas->offset;
5068 if (unlikely(offset == MAPLE_NODE_SLOTS))
5069 return -EBUSY;
5070
5071 mt = mte_node_type(mas->node);
5072 pivots = ma_pivots(mas_mn(mas), mt);
5073 min = mas_safe_min(mas, pivots, offset);
5074 if (mas->index < min)
5075 mas->index = min;
5076 mas->last = mas->index + size - 1;
5077 return 0;
5078 }
5079 EXPORT_SYMBOL_GPL(mas_empty_area);
5080
5081 /*
5082 * mas_empty_area_rev() - Get the highest address within the range that is
5083 * sufficient for the size requested.
5084 * @mas: The maple state
5085 * @min: The lowest value of the range
5086 * @max: The highest value of the range
5087 * @size: The size needed
5088 */
mas_empty_area_rev(struct ma_state * mas,unsigned long min,unsigned long max,unsigned long size)5089 int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
5090 unsigned long max, unsigned long size)
5091 {
5092 struct maple_enode *last = mas->node;
5093
5094 if (min > max)
5095 return -EINVAL;
5096
5097 if (size == 0 || max - min < size - 1)
5098 return -EINVAL;
5099
5100 if (mas_is_start(mas))
5101 mas_start(mas);
5102 else if ((mas->offset < 2) && (!mas_rewind_node(mas)))
5103 return -EBUSY;
5104
5105 if (unlikely(mas_is_none(mas) || mas_is_ptr(mas)))
5106 return mas_sparse_area(mas, min, max, size, false);
5107 else if (mas->offset >= 2)
5108 mas->offset -= 2;
5109 else
5110 mas->offset = mas_data_end(mas);
5111
5112
5113 /* The start of the window can only be within these values. */
5114 mas->index = min;
5115 mas->last = max;
5116
5117 while (!mas_rev_awalk(mas, size, &min, &max)) {
5118 if (last == mas->node) {
5119 if (!mas_rewind_node(mas))
5120 return -EBUSY;
5121 } else {
5122 last = mas->node;
5123 }
5124 }
5125
5126 if (mas_is_err(mas))
5127 return xa_err(mas->node);
5128
5129 if (unlikely(mas->offset == MAPLE_NODE_SLOTS))
5130 return -EBUSY;
5131
5132 /* Trim the upper limit to the max. */
5133 if (max < mas->last)
5134 mas->last = max;
5135
5136 mas->index = mas->last - size + 1;
5137 return 0;
5138 }
5139 EXPORT_SYMBOL_GPL(mas_empty_area_rev);
5140
5141 /*
5142 * mte_dead_leaves() - Mark all leaves of a node as dead.
5143 * @mas: The maple state
5144 * @slots: Pointer to the slot array
5145 * @type: The maple node type
5146 *
5147 * Must hold the write lock.
5148 *
5149 * Return: The number of leaves marked as dead.
5150 */
5151 static inline
mte_dead_leaves(struct maple_enode * enode,struct maple_tree * mt,void __rcu ** slots)5152 unsigned char mte_dead_leaves(struct maple_enode *enode, struct maple_tree *mt,
5153 void __rcu **slots)
5154 {
5155 struct maple_node *node;
5156 enum maple_type type;
5157 void *entry;
5158 int offset;
5159
5160 for (offset = 0; offset < mt_slot_count(enode); offset++) {
5161 entry = mt_slot(mt, slots, offset);
5162 type = mte_node_type(entry);
5163 node = mte_to_node(entry);
5164 /* Use both node and type to catch LE & BE metadata */
5165 if (!node || !type)
5166 break;
5167
5168 mte_set_node_dead(entry);
5169 node->type = type;
5170 rcu_assign_pointer(slots[offset], node);
5171 }
5172
5173 return offset;
5174 }
5175
5176 /**
5177 * mte_dead_walk() - Walk down a dead tree to just before the leaves
5178 * @enode: The maple encoded node
5179 * @offset: The starting offset
5180 *
5181 * Note: This can only be used from the RCU callback context.
5182 */
mte_dead_walk(struct maple_enode ** enode,unsigned char offset)5183 static void __rcu **mte_dead_walk(struct maple_enode **enode, unsigned char offset)
5184 {
5185 struct maple_node *node, *next;
5186 void __rcu **slots = NULL;
5187
5188 next = mte_to_node(*enode);
5189 do {
5190 *enode = ma_enode_ptr(next);
5191 node = mte_to_node(*enode);
5192 slots = ma_slots(node, node->type);
5193 next = rcu_dereference_protected(slots[offset],
5194 lock_is_held(&rcu_callback_map));
5195 offset = 0;
5196 } while (!ma_is_leaf(next->type));
5197
5198 return slots;
5199 }
5200
5201 /**
5202 * mt_free_walk() - Walk & free a tree in the RCU callback context
5203 * @head: The RCU head that's within the node.
5204 *
5205 * Note: This can only be used from the RCU callback context.
5206 */
mt_free_walk(struct rcu_head * head)5207 static void mt_free_walk(struct rcu_head *head)
5208 {
5209 void __rcu **slots;
5210 struct maple_node *node, *start;
5211 struct maple_enode *enode;
5212 unsigned char offset;
5213 enum maple_type type;
5214
5215 node = container_of(head, struct maple_node, rcu);
5216
5217 if (ma_is_leaf(node->type))
5218 goto free_leaf;
5219
5220 start = node;
5221 enode = mt_mk_node(node, node->type);
5222 slots = mte_dead_walk(&enode, 0);
5223 node = mte_to_node(enode);
5224 do {
5225 mt_free_bulk(node->slot_len, slots);
5226 offset = node->parent_slot + 1;
5227 enode = node->piv_parent;
5228 if (mte_to_node(enode) == node)
5229 goto free_leaf;
5230
5231 type = mte_node_type(enode);
5232 slots = ma_slots(mte_to_node(enode), type);
5233 if ((offset < mt_slots[type]) &&
5234 rcu_dereference_protected(slots[offset],
5235 lock_is_held(&rcu_callback_map)))
5236 slots = mte_dead_walk(&enode, offset);
5237 node = mte_to_node(enode);
5238 } while ((node != start) || (node->slot_len < offset));
5239
5240 slots = ma_slots(node, node->type);
5241 mt_free_bulk(node->slot_len, slots);
5242
5243 free_leaf:
5244 mt_free_rcu(&node->rcu);
5245 }
5246
mte_destroy_descend(struct maple_enode ** enode,struct maple_tree * mt,struct maple_enode * prev,unsigned char offset)5247 static inline void __rcu **mte_destroy_descend(struct maple_enode **enode,
5248 struct maple_tree *mt, struct maple_enode *prev, unsigned char offset)
5249 {
5250 struct maple_node *node;
5251 struct maple_enode *next = *enode;
5252 void __rcu **slots = NULL;
5253 enum maple_type type;
5254 unsigned char next_offset = 0;
5255
5256 do {
5257 *enode = next;
5258 node = mte_to_node(*enode);
5259 type = mte_node_type(*enode);
5260 slots = ma_slots(node, type);
5261 next = mt_slot_locked(mt, slots, next_offset);
5262 if ((mte_dead_node(next)))
5263 next = mt_slot_locked(mt, slots, ++next_offset);
5264
5265 mte_set_node_dead(*enode);
5266 node->type = type;
5267 node->piv_parent = prev;
5268 node->parent_slot = offset;
5269 offset = next_offset;
5270 next_offset = 0;
5271 prev = *enode;
5272 } while (!mte_is_leaf(next));
5273
5274 return slots;
5275 }
5276
mt_destroy_walk(struct maple_enode * enode,struct maple_tree * mt,bool free)5277 static void mt_destroy_walk(struct maple_enode *enode, struct maple_tree *mt,
5278 bool free)
5279 {
5280 void __rcu **slots;
5281 struct maple_node *node = mte_to_node(enode);
5282 struct maple_enode *start;
5283
5284 if (mte_is_leaf(enode)) {
5285 node->type = mte_node_type(enode);
5286 goto free_leaf;
5287 }
5288
5289 start = enode;
5290 slots = mte_destroy_descend(&enode, mt, start, 0);
5291 node = mte_to_node(enode); // Updated in the above call.
5292 do {
5293 enum maple_type type;
5294 unsigned char offset;
5295 struct maple_enode *parent, *tmp;
5296
5297 node->slot_len = mte_dead_leaves(enode, mt, slots);
5298 if (free)
5299 mt_free_bulk(node->slot_len, slots);
5300 offset = node->parent_slot + 1;
5301 enode = node->piv_parent;
5302 if (mte_to_node(enode) == node)
5303 goto free_leaf;
5304
5305 type = mte_node_type(enode);
5306 slots = ma_slots(mte_to_node(enode), type);
5307 if (offset >= mt_slots[type])
5308 goto next;
5309
5310 tmp = mt_slot_locked(mt, slots, offset);
5311 if (mte_node_type(tmp) && mte_to_node(tmp)) {
5312 parent = enode;
5313 enode = tmp;
5314 slots = mte_destroy_descend(&enode, mt, parent, offset);
5315 }
5316 next:
5317 node = mte_to_node(enode);
5318 } while (start != enode);
5319
5320 node = mte_to_node(enode);
5321 node->slot_len = mte_dead_leaves(enode, mt, slots);
5322 if (free)
5323 mt_free_bulk(node->slot_len, slots);
5324
5325 free_leaf:
5326 if (free)
5327 mt_free_rcu(&node->rcu);
5328 else
5329 mt_clear_meta(mt, node, node->type);
5330 }
5331
5332 /*
5333 * mte_destroy_walk() - Free a tree or sub-tree.
5334 * @enode: the encoded maple node (maple_enode) to start
5335 * @mt: the tree to free - needed for node types.
5336 *
5337 * Must hold the write lock.
5338 */
mte_destroy_walk(struct maple_enode * enode,struct maple_tree * mt)5339 static inline void mte_destroy_walk(struct maple_enode *enode,
5340 struct maple_tree *mt)
5341 {
5342 struct maple_node *node = mte_to_node(enode);
5343
5344 if (mt_in_rcu(mt)) {
5345 mt_destroy_walk(enode, mt, false);
5346 call_rcu(&node->rcu, mt_free_walk);
5347 } else {
5348 mt_destroy_walk(enode, mt, true);
5349 }
5350 }
5351
mas_wr_store_setup(struct ma_wr_state * wr_mas)5352 static void mas_wr_store_setup(struct ma_wr_state *wr_mas)
5353 {
5354 if (!mas_is_active(wr_mas->mas)) {
5355 if (mas_is_start(wr_mas->mas))
5356 return;
5357
5358 if (unlikely(mas_is_paused(wr_mas->mas)))
5359 goto reset;
5360
5361 if (unlikely(mas_is_none(wr_mas->mas)))
5362 goto reset;
5363
5364 if (unlikely(mas_is_overflow(wr_mas->mas)))
5365 goto reset;
5366
5367 if (unlikely(mas_is_underflow(wr_mas->mas)))
5368 goto reset;
5369 }
5370
5371 /*
5372 * A less strict version of mas_is_span_wr() where we allow spanning
5373 * writes within this node. This is to stop partial walks in
5374 * mas_prealloc() from being reset.
5375 */
5376 if (wr_mas->mas->last > wr_mas->mas->max)
5377 goto reset;
5378
5379 if (wr_mas->entry)
5380 return;
5381
5382 if (mte_is_leaf(wr_mas->mas->node) &&
5383 wr_mas->mas->last == wr_mas->mas->max)
5384 goto reset;
5385
5386 return;
5387
5388 reset:
5389 mas_reset(wr_mas->mas);
5390 }
5391
5392 /* Interface */
5393
5394 /**
5395 * mas_store() - Store an @entry.
5396 * @mas: The maple state.
5397 * @entry: The entry to store.
5398 *
5399 * The @mas->index and @mas->last is used to set the range for the @entry.
5400 * Note: The @mas should have pre-allocated entries to ensure there is memory to
5401 * store the entry. Please see mas_expected_entries()/mas_destroy() for more details.
5402 *
5403 * Return: the first entry between mas->index and mas->last or %NULL.
5404 */
mas_store(struct ma_state * mas,void * entry)5405 void *mas_store(struct ma_state *mas, void *entry)
5406 {
5407 MA_WR_STATE(wr_mas, mas, entry);
5408
5409 trace_ma_write(__func__, mas, 0, entry);
5410 #ifdef CONFIG_DEBUG_MAPLE_TREE
5411 if (MAS_WARN_ON(mas, mas->index > mas->last))
5412 pr_err("Error %lX > %lX %p\n", mas->index, mas->last, entry);
5413
5414 if (mas->index > mas->last) {
5415 mas_set_err(mas, -EINVAL);
5416 return NULL;
5417 }
5418
5419 #endif
5420
5421 /*
5422 * Storing is the same operation as insert with the added caveat that it
5423 * can overwrite entries. Although this seems simple enough, one may
5424 * want to examine what happens if a single store operation was to
5425 * overwrite multiple entries within a self-balancing B-Tree.
5426 */
5427 mas_wr_store_setup(&wr_mas);
5428 mas_wr_store_entry(&wr_mas);
5429 return wr_mas.content;
5430 }
5431 EXPORT_SYMBOL_GPL(mas_store);
5432
5433 /**
5434 * mas_store_gfp() - Store a value into the tree.
5435 * @mas: The maple state
5436 * @entry: The entry to store
5437 * @gfp: The GFP_FLAGS to use for allocations if necessary.
5438 *
5439 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
5440 * be allocated.
5441 */
mas_store_gfp(struct ma_state * mas,void * entry,gfp_t gfp)5442 int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp)
5443 {
5444 MA_WR_STATE(wr_mas, mas, entry);
5445
5446 mas_wr_store_setup(&wr_mas);
5447 trace_ma_write(__func__, mas, 0, entry);
5448 retry:
5449 mas_wr_store_entry(&wr_mas);
5450 if (unlikely(mas_nomem(mas, gfp)))
5451 goto retry;
5452
5453 if (unlikely(mas_is_err(mas)))
5454 return xa_err(mas->node);
5455
5456 return 0;
5457 }
5458 EXPORT_SYMBOL_GPL(mas_store_gfp);
5459
5460 /**
5461 * mas_store_prealloc() - Store a value into the tree using memory
5462 * preallocated in the maple state.
5463 * @mas: The maple state
5464 * @entry: The entry to store.
5465 */
mas_store_prealloc(struct ma_state * mas,void * entry)5466 void mas_store_prealloc(struct ma_state *mas, void *entry)
5467 {
5468 MA_WR_STATE(wr_mas, mas, entry);
5469
5470 mas_wr_store_setup(&wr_mas);
5471 trace_ma_write(__func__, mas, 0, entry);
5472 mas_wr_store_entry(&wr_mas);
5473 MAS_WR_BUG_ON(&wr_mas, mas_is_err(mas));
5474 mas_destroy(mas);
5475 }
5476 EXPORT_SYMBOL_GPL(mas_store_prealloc);
5477
5478 /**
5479 * mas_preallocate() - Preallocate enough nodes for a store operation
5480 * @mas: The maple state
5481 * @entry: The entry that will be stored
5482 * @gfp: The GFP_FLAGS to use for allocations.
5483 *
5484 * Return: 0 on success, -ENOMEM if memory could not be allocated.
5485 */
mas_preallocate(struct ma_state * mas,void * entry,gfp_t gfp)5486 int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp)
5487 {
5488 MA_WR_STATE(wr_mas, mas, entry);
5489 unsigned char node_size;
5490 int request = 1;
5491 int ret;
5492
5493
5494 if (unlikely(!mas->index && mas->last == ULONG_MAX))
5495 goto ask_now;
5496
5497 mas_wr_store_setup(&wr_mas);
5498 wr_mas.content = mas_start(mas);
5499 /* Root expand */
5500 if (unlikely(mas_is_none(mas) || mas_is_ptr(mas)))
5501 goto ask_now;
5502
5503 if (unlikely(!mas_wr_walk(&wr_mas))) {
5504 /* Spanning store, use worst case for now */
5505 request = 1 + mas_mt_height(mas) * 3;
5506 goto ask_now;
5507 }
5508
5509 /* At this point, we are at the leaf node that needs to be altered. */
5510 /* Exact fit, no nodes needed. */
5511 if (wr_mas.r_min == mas->index && wr_mas.r_max == mas->last)
5512 return 0;
5513
5514 mas_wr_end_piv(&wr_mas);
5515 node_size = mas_wr_new_end(&wr_mas);
5516
5517 /* Slot store, does not require additional nodes */
5518 if (node_size == wr_mas.node_end) {
5519 /* reuse node */
5520 if (!mt_in_rcu(mas->tree))
5521 return 0;
5522 /* shifting boundary */
5523 if (wr_mas.offset_end - mas->offset == 1)
5524 return 0;
5525 }
5526
5527 if (node_size >= mt_slots[wr_mas.type]) {
5528 /* Split, worst case for now. */
5529 request = 1 + mas_mt_height(mas) * 2;
5530 goto ask_now;
5531 }
5532
5533 /* New root needs a singe node */
5534 if (unlikely(mte_is_root(mas->node)))
5535 goto ask_now;
5536
5537 /* Potential spanning rebalance collapsing a node, use worst-case */
5538 if (node_size - 1 <= mt_min_slots[wr_mas.type])
5539 request = mas_mt_height(mas) * 2 - 1;
5540
5541 /* node store, slot store needs one node */
5542 ask_now:
5543 mas_node_count_gfp(mas, request, gfp);
5544 mas->mas_flags |= MA_STATE_PREALLOC;
5545 if (likely(!mas_is_err(mas)))
5546 return 0;
5547
5548 mas_set_alloc_req(mas, 0);
5549 ret = xa_err(mas->node);
5550 mas_reset(mas);
5551 mas_destroy(mas);
5552 mas_reset(mas);
5553 return ret;
5554 }
5555 EXPORT_SYMBOL_GPL(mas_preallocate);
5556
5557 /*
5558 * mas_destroy() - destroy a maple state.
5559 * @mas: The maple state
5560 *
5561 * Upon completion, check the left-most node and rebalance against the node to
5562 * the right if necessary. Frees any allocated nodes associated with this maple
5563 * state.
5564 */
mas_destroy(struct ma_state * mas)5565 void mas_destroy(struct ma_state *mas)
5566 {
5567 struct maple_alloc *node;
5568 unsigned long total;
5569
5570 /*
5571 * When using mas_for_each() to insert an expected number of elements,
5572 * it is possible that the number inserted is less than the expected
5573 * number. To fix an invalid final node, a check is performed here to
5574 * rebalance the previous node with the final node.
5575 */
5576 if (mas->mas_flags & MA_STATE_REBALANCE) {
5577 unsigned char end;
5578
5579 mas_start(mas);
5580 mtree_range_walk(mas);
5581 end = mas_data_end(mas) + 1;
5582 if (end < mt_min_slot_count(mas->node) - 1)
5583 mas_destroy_rebalance(mas, end);
5584
5585 mas->mas_flags &= ~MA_STATE_REBALANCE;
5586 }
5587 mas->mas_flags &= ~(MA_STATE_BULK|MA_STATE_PREALLOC);
5588
5589 total = mas_allocated(mas);
5590 while (total) {
5591 node = mas->alloc;
5592 mas->alloc = node->slot[0];
5593 if (node->node_count > 1) {
5594 size_t count = node->node_count - 1;
5595
5596 mt_free_bulk(count, (void __rcu **)&node->slot[1]);
5597 total -= count;
5598 }
5599 mt_free_one(ma_mnode_ptr(node));
5600 total--;
5601 }
5602
5603 mas->alloc = NULL;
5604 }
5605 EXPORT_SYMBOL_GPL(mas_destroy);
5606
5607 /*
5608 * mas_expected_entries() - Set the expected number of entries that will be inserted.
5609 * @mas: The maple state
5610 * @nr_entries: The number of expected entries.
5611 *
5612 * This will attempt to pre-allocate enough nodes to store the expected number
5613 * of entries. The allocations will occur using the bulk allocator interface
5614 * for speed. Please call mas_destroy() on the @mas after inserting the entries
5615 * to ensure any unused nodes are freed.
5616 *
5617 * Return: 0 on success, -ENOMEM if memory could not be allocated.
5618 */
mas_expected_entries(struct ma_state * mas,unsigned long nr_entries)5619 int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries)
5620 {
5621 int nonleaf_cap = MAPLE_ARANGE64_SLOTS - 2;
5622 struct maple_enode *enode = mas->node;
5623 int nr_nodes;
5624 int ret;
5625
5626 /*
5627 * Sometimes it is necessary to duplicate a tree to a new tree, such as
5628 * forking a process and duplicating the VMAs from one tree to a new
5629 * tree. When such a situation arises, it is known that the new tree is
5630 * not going to be used until the entire tree is populated. For
5631 * performance reasons, it is best to use a bulk load with RCU disabled.
5632 * This allows for optimistic splitting that favours the left and reuse
5633 * of nodes during the operation.
5634 */
5635
5636 /* Optimize splitting for bulk insert in-order */
5637 mas->mas_flags |= MA_STATE_BULK;
5638
5639 /*
5640 * Avoid overflow, assume a gap between each entry and a trailing null.
5641 * If this is wrong, it just means allocation can happen during
5642 * insertion of entries.
5643 */
5644 nr_nodes = max(nr_entries, nr_entries * 2 + 1);
5645 if (!mt_is_alloc(mas->tree))
5646 nonleaf_cap = MAPLE_RANGE64_SLOTS - 2;
5647
5648 /* Leaves; reduce slots to keep space for expansion */
5649 nr_nodes = DIV_ROUND_UP(nr_nodes, MAPLE_RANGE64_SLOTS - 2);
5650 /* Internal nodes */
5651 nr_nodes += DIV_ROUND_UP(nr_nodes, nonleaf_cap);
5652 /* Add working room for split (2 nodes) + new parents */
5653 mas_node_count_gfp(mas, nr_nodes + 3, GFP_KERNEL);
5654
5655 /* Detect if allocations run out */
5656 mas->mas_flags |= MA_STATE_PREALLOC;
5657
5658 if (!mas_is_err(mas))
5659 return 0;
5660
5661 ret = xa_err(mas->node);
5662 mas->node = enode;
5663 mas_destroy(mas);
5664 return ret;
5665
5666 }
5667 EXPORT_SYMBOL_GPL(mas_expected_entries);
5668
mas_next_setup(struct ma_state * mas,unsigned long max,void ** entry)5669 static inline bool mas_next_setup(struct ma_state *mas, unsigned long max,
5670 void **entry)
5671 {
5672 bool was_none = mas_is_none(mas);
5673
5674 if (unlikely(mas->last >= max)) {
5675 mas->node = MAS_OVERFLOW;
5676 return true;
5677 }
5678
5679 if (mas_is_active(mas))
5680 return false;
5681
5682 if (mas_is_none(mas) || mas_is_paused(mas)) {
5683 mas->node = MAS_START;
5684 } else if (mas_is_overflow(mas)) {
5685 /* Overflowed before, but the max changed */
5686 mas->node = MAS_START;
5687 } else if (mas_is_underflow(mas)) {
5688 mas->node = MAS_START;
5689 *entry = mas_walk(mas);
5690 if (*entry)
5691 return true;
5692 }
5693
5694 if (mas_is_start(mas))
5695 *entry = mas_walk(mas); /* Retries on dead nodes handled by mas_walk */
5696
5697 if (mas_is_ptr(mas)) {
5698 *entry = NULL;
5699 if (was_none && mas->index == 0) {
5700 mas->index = mas->last = 0;
5701 return true;
5702 }
5703 mas->index = 1;
5704 mas->last = ULONG_MAX;
5705 mas->node = MAS_NONE;
5706 return true;
5707 }
5708
5709 if (mas_is_none(mas))
5710 return true;
5711
5712 return false;
5713 }
5714
5715 /**
5716 * mas_next() - Get the next entry.
5717 * @mas: The maple state
5718 * @max: The maximum index to check.
5719 *
5720 * Returns the next entry after @mas->index.
5721 * Must hold rcu_read_lock or the write lock.
5722 * Can return the zero entry.
5723 *
5724 * Return: The next entry or %NULL
5725 */
mas_next(struct ma_state * mas,unsigned long max)5726 void *mas_next(struct ma_state *mas, unsigned long max)
5727 {
5728 void *entry = NULL;
5729
5730 if (mas_next_setup(mas, max, &entry))
5731 return entry;
5732
5733 /* Retries on dead nodes handled by mas_next_slot */
5734 return mas_next_slot(mas, max, false, true);
5735 }
5736 EXPORT_SYMBOL_GPL(mas_next);
5737
5738 /**
5739 * mas_next_range() - Advance the maple state to the next range
5740 * @mas: The maple state
5741 * @max: The maximum index to check.
5742 *
5743 * Sets @mas->index and @mas->last to the range.
5744 * Must hold rcu_read_lock or the write lock.
5745 * Can return the zero entry.
5746 *
5747 * Return: The next entry or %NULL
5748 */
mas_next_range(struct ma_state * mas,unsigned long max)5749 void *mas_next_range(struct ma_state *mas, unsigned long max)
5750 {
5751 void *entry = NULL;
5752
5753 if (mas_next_setup(mas, max, &entry))
5754 return entry;
5755
5756 /* Retries on dead nodes handled by mas_next_slot */
5757 return mas_next_slot(mas, max, true, true);
5758 }
5759 EXPORT_SYMBOL_GPL(mas_next_range);
5760
5761 /**
5762 * mt_next() - get the next value in the maple tree
5763 * @mt: The maple tree
5764 * @index: The start index
5765 * @max: The maximum index to check
5766 *
5767 * Takes RCU read lock internally to protect the search, which does not
5768 * protect the returned pointer after dropping RCU read lock.
5769 * See also: Documentation/core-api/maple_tree.rst
5770 *
5771 * Return: The entry higher than @index or %NULL if nothing is found.
5772 */
mt_next(struct maple_tree * mt,unsigned long index,unsigned long max)5773 void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max)
5774 {
5775 void *entry = NULL;
5776 MA_STATE(mas, mt, index, index);
5777
5778 rcu_read_lock();
5779 entry = mas_next(&mas, max);
5780 rcu_read_unlock();
5781 return entry;
5782 }
5783 EXPORT_SYMBOL_GPL(mt_next);
5784
mas_prev_setup(struct ma_state * mas,unsigned long min,void ** entry)5785 static inline bool mas_prev_setup(struct ma_state *mas, unsigned long min,
5786 void **entry)
5787 {
5788 if (unlikely(mas->index <= min)) {
5789 mas->node = MAS_UNDERFLOW;
5790 return true;
5791 }
5792
5793 if (mas_is_active(mas))
5794 return false;
5795
5796 if (mas_is_overflow(mas)) {
5797 mas->node = MAS_START;
5798 *entry = mas_walk(mas);
5799 if (*entry)
5800 return true;
5801 }
5802
5803 if (mas_is_none(mas) || mas_is_paused(mas)) {
5804 mas->node = MAS_START;
5805 } else if (mas_is_underflow(mas)) {
5806 /* underflowed before but the min changed */
5807 mas->node = MAS_START;
5808 }
5809
5810 if (mas_is_start(mas))
5811 mas_walk(mas);
5812
5813 if (unlikely(mas_is_ptr(mas))) {
5814 if (!mas->index)
5815 goto none;
5816 mas->index = mas->last = 0;
5817 *entry = mas_root(mas);
5818 return true;
5819 }
5820
5821 if (mas_is_none(mas)) {
5822 if (mas->index) {
5823 /* Walked to out-of-range pointer? */
5824 mas->index = mas->last = 0;
5825 mas->node = MAS_ROOT;
5826 *entry = mas_root(mas);
5827 return true;
5828 }
5829 return true;
5830 }
5831
5832 return false;
5833
5834 none:
5835 mas->node = MAS_NONE;
5836 return true;
5837 }
5838
5839 /**
5840 * mas_prev() - Get the previous entry
5841 * @mas: The maple state
5842 * @min: The minimum value to check.
5843 *
5844 * Must hold rcu_read_lock or the write lock.
5845 * Will reset mas to MAS_START if the node is MAS_NONE. Will stop on not
5846 * searchable nodes.
5847 *
5848 * Return: the previous value or %NULL.
5849 */
mas_prev(struct ma_state * mas,unsigned long min)5850 void *mas_prev(struct ma_state *mas, unsigned long min)
5851 {
5852 void *entry = NULL;
5853
5854 if (mas_prev_setup(mas, min, &entry))
5855 return entry;
5856
5857 return mas_prev_slot(mas, min, false, true);
5858 }
5859 EXPORT_SYMBOL_GPL(mas_prev);
5860
5861 /**
5862 * mas_prev_range() - Advance to the previous range
5863 * @mas: The maple state
5864 * @min: The minimum value to check.
5865 *
5866 * Sets @mas->index and @mas->last to the range.
5867 * Must hold rcu_read_lock or the write lock.
5868 * Will reset mas to MAS_START if the node is MAS_NONE. Will stop on not
5869 * searchable nodes.
5870 *
5871 * Return: the previous value or %NULL.
5872 */
mas_prev_range(struct ma_state * mas,unsigned long min)5873 void *mas_prev_range(struct ma_state *mas, unsigned long min)
5874 {
5875 void *entry = NULL;
5876
5877 if (mas_prev_setup(mas, min, &entry))
5878 return entry;
5879
5880 return mas_prev_slot(mas, min, true, true);
5881 }
5882 EXPORT_SYMBOL_GPL(mas_prev_range);
5883
5884 /**
5885 * mt_prev() - get the previous value in the maple tree
5886 * @mt: The maple tree
5887 * @index: The start index
5888 * @min: The minimum index to check
5889 *
5890 * Takes RCU read lock internally to protect the search, which does not
5891 * protect the returned pointer after dropping RCU read lock.
5892 * See also: Documentation/core-api/maple_tree.rst
5893 *
5894 * Return: The entry before @index or %NULL if nothing is found.
5895 */
mt_prev(struct maple_tree * mt,unsigned long index,unsigned long min)5896 void *mt_prev(struct maple_tree *mt, unsigned long index, unsigned long min)
5897 {
5898 void *entry = NULL;
5899 MA_STATE(mas, mt, index, index);
5900
5901 rcu_read_lock();
5902 entry = mas_prev(&mas, min);
5903 rcu_read_unlock();
5904 return entry;
5905 }
5906 EXPORT_SYMBOL_GPL(mt_prev);
5907
5908 /**
5909 * mas_pause() - Pause a mas_find/mas_for_each to drop the lock.
5910 * @mas: The maple state to pause
5911 *
5912 * Some users need to pause a walk and drop the lock they're holding in
5913 * order to yield to a higher priority thread or carry out an operation
5914 * on an entry. Those users should call this function before they drop
5915 * the lock. It resets the @mas to be suitable for the next iteration
5916 * of the loop after the user has reacquired the lock. If most entries
5917 * found during a walk require you to call mas_pause(), the mt_for_each()
5918 * iterator may be more appropriate.
5919 *
5920 */
mas_pause(struct ma_state * mas)5921 void mas_pause(struct ma_state *mas)
5922 {
5923 mas->node = MAS_PAUSE;
5924 }
5925 EXPORT_SYMBOL_GPL(mas_pause);
5926
5927 /**
5928 * mas_find_setup() - Internal function to set up mas_find*().
5929 * @mas: The maple state
5930 * @max: The maximum index
5931 * @entry: Pointer to the entry
5932 *
5933 * Returns: True if entry is the answer, false otherwise.
5934 */
mas_find_setup(struct ma_state * mas,unsigned long max,void ** entry)5935 static inline bool mas_find_setup(struct ma_state *mas, unsigned long max,
5936 void **entry)
5937 {
5938 if (mas_is_active(mas)) {
5939 if (mas->last < max)
5940 return false;
5941
5942 return true;
5943 }
5944
5945 if (mas_is_paused(mas)) {
5946 if (unlikely(mas->last >= max))
5947 return true;
5948
5949 mas->index = ++mas->last;
5950 mas->node = MAS_START;
5951 } else if (mas_is_none(mas)) {
5952 if (unlikely(mas->last >= max))
5953 return true;
5954
5955 mas->index = mas->last;
5956 mas->node = MAS_START;
5957 } else if (mas_is_overflow(mas) || mas_is_underflow(mas)) {
5958 if (mas->index > max) {
5959 mas->node = MAS_OVERFLOW;
5960 return true;
5961 }
5962
5963 mas->node = MAS_START;
5964 }
5965
5966 if (mas_is_start(mas)) {
5967 /* First run or continue */
5968 if (mas->index > max)
5969 return true;
5970
5971 *entry = mas_walk(mas);
5972 if (*entry)
5973 return true;
5974
5975 }
5976
5977 if (unlikely(!mas_searchable(mas))) {
5978 if (unlikely(mas_is_ptr(mas)))
5979 goto ptr_out_of_range;
5980
5981 return true;
5982 }
5983
5984 if (mas->index == max)
5985 return true;
5986
5987 return false;
5988
5989 ptr_out_of_range:
5990 mas->node = MAS_NONE;
5991 mas->index = 1;
5992 mas->last = ULONG_MAX;
5993 return true;
5994 }
5995
5996 /**
5997 * mas_find() - On the first call, find the entry at or after mas->index up to
5998 * %max. Otherwise, find the entry after mas->index.
5999 * @mas: The maple state
6000 * @max: The maximum value to check.
6001 *
6002 * Must hold rcu_read_lock or the write lock.
6003 * If an entry exists, last and index are updated accordingly.
6004 * May set @mas->node to MAS_NONE.
6005 *
6006 * Return: The entry or %NULL.
6007 */
mas_find(struct ma_state * mas,unsigned long max)6008 void *mas_find(struct ma_state *mas, unsigned long max)
6009 {
6010 void *entry = NULL;
6011
6012 if (mas_find_setup(mas, max, &entry))
6013 return entry;
6014
6015 /* Retries on dead nodes handled by mas_next_slot */
6016 return mas_next_slot(mas, max, false, false);
6017 }
6018 EXPORT_SYMBOL_GPL(mas_find);
6019
6020 /**
6021 * mas_find_range() - On the first call, find the entry at or after
6022 * mas->index up to %max. Otherwise, advance to the next slot mas->index.
6023 * @mas: The maple state
6024 * @max: The maximum value to check.
6025 *
6026 * Must hold rcu_read_lock or the write lock.
6027 * If an entry exists, last and index are updated accordingly.
6028 * May set @mas->node to MAS_NONE.
6029 *
6030 * Return: The entry or %NULL.
6031 */
mas_find_range(struct ma_state * mas,unsigned long max)6032 void *mas_find_range(struct ma_state *mas, unsigned long max)
6033 {
6034 void *entry = NULL;
6035
6036 if (mas_find_setup(mas, max, &entry))
6037 return entry;
6038
6039 /* Retries on dead nodes handled by mas_next_slot */
6040 return mas_next_slot(mas, max, true, false);
6041 }
6042 EXPORT_SYMBOL_GPL(mas_find_range);
6043
6044 /**
6045 * mas_find_rev_setup() - Internal function to set up mas_find_*_rev()
6046 * @mas: The maple state
6047 * @min: The minimum index
6048 * @entry: Pointer to the entry
6049 *
6050 * Returns: True if entry is the answer, false otherwise.
6051 */
mas_find_rev_setup(struct ma_state * mas,unsigned long min,void ** entry)6052 static inline bool mas_find_rev_setup(struct ma_state *mas, unsigned long min,
6053 void **entry)
6054 {
6055 if (mas_is_active(mas)) {
6056 if (mas->index > min)
6057 return false;
6058
6059 return true;
6060 }
6061
6062 if (mas_is_paused(mas)) {
6063 if (unlikely(mas->index <= min)) {
6064 mas->node = MAS_NONE;
6065 return true;
6066 }
6067 mas->node = MAS_START;
6068 mas->last = --mas->index;
6069 } else if (mas_is_none(mas)) {
6070 if (mas->index <= min)
6071 goto none;
6072
6073 mas->last = mas->index;
6074 mas->node = MAS_START;
6075 } else if (mas_is_underflow(mas) || mas_is_overflow(mas)) {
6076 if (mas->last <= min) {
6077 mas->node = MAS_UNDERFLOW;
6078 return true;
6079 }
6080
6081 mas->node = MAS_START;
6082 }
6083
6084 if (mas_is_start(mas)) {
6085 /* First run or continue */
6086 if (mas->index < min)
6087 return true;
6088
6089 *entry = mas_walk(mas);
6090 if (*entry)
6091 return true;
6092 }
6093
6094 if (unlikely(!mas_searchable(mas))) {
6095 if (mas_is_ptr(mas))
6096 goto none;
6097
6098 if (mas_is_none(mas)) {
6099 /*
6100 * Walked to the location, and there was nothing so the
6101 * previous location is 0.
6102 */
6103 mas->last = mas->index = 0;
6104 mas->node = MAS_ROOT;
6105 *entry = mas_root(mas);
6106 return true;
6107 }
6108 }
6109
6110 if (mas->index < min)
6111 return true;
6112
6113 return false;
6114
6115 none:
6116 mas->node = MAS_NONE;
6117 return true;
6118 }
6119
6120 /**
6121 * mas_find_rev: On the first call, find the first non-null entry at or below
6122 * mas->index down to %min. Otherwise find the first non-null entry below
6123 * mas->index down to %min.
6124 * @mas: The maple state
6125 * @min: The minimum value to check.
6126 *
6127 * Must hold rcu_read_lock or the write lock.
6128 * If an entry exists, last and index are updated accordingly.
6129 * May set @mas->node to MAS_NONE.
6130 *
6131 * Return: The entry or %NULL.
6132 */
mas_find_rev(struct ma_state * mas,unsigned long min)6133 void *mas_find_rev(struct ma_state *mas, unsigned long min)
6134 {
6135 void *entry = NULL;
6136
6137 if (mas_find_rev_setup(mas, min, &entry))
6138 return entry;
6139
6140 /* Retries on dead nodes handled by mas_prev_slot */
6141 return mas_prev_slot(mas, min, false, false);
6142
6143 }
6144 EXPORT_SYMBOL_GPL(mas_find_rev);
6145
6146 /**
6147 * mas_find_range_rev: On the first call, find the first non-null entry at or
6148 * below mas->index down to %min. Otherwise advance to the previous slot after
6149 * mas->index down to %min.
6150 * @mas: The maple state
6151 * @min: The minimum value to check.
6152 *
6153 * Must hold rcu_read_lock or the write lock.
6154 * If an entry exists, last and index are updated accordingly.
6155 * May set @mas->node to MAS_NONE.
6156 *
6157 * Return: The entry or %NULL.
6158 */
mas_find_range_rev(struct ma_state * mas,unsigned long min)6159 void *mas_find_range_rev(struct ma_state *mas, unsigned long min)
6160 {
6161 void *entry = NULL;
6162
6163 if (mas_find_rev_setup(mas, min, &entry))
6164 return entry;
6165
6166 /* Retries on dead nodes handled by mas_prev_slot */
6167 return mas_prev_slot(mas, min, true, false);
6168 }
6169 EXPORT_SYMBOL_GPL(mas_find_range_rev);
6170
6171 /**
6172 * mas_erase() - Find the range in which index resides and erase the entire
6173 * range.
6174 * @mas: The maple state
6175 *
6176 * Must hold the write lock.
6177 * Searches for @mas->index, sets @mas->index and @mas->last to the range and
6178 * erases that range.
6179 *
6180 * Return: the entry that was erased or %NULL, @mas->index and @mas->last are updated.
6181 */
mas_erase(struct ma_state * mas)6182 void *mas_erase(struct ma_state *mas)
6183 {
6184 void *entry;
6185 MA_WR_STATE(wr_mas, mas, NULL);
6186
6187 if (mas_is_none(mas) || mas_is_paused(mas))
6188 mas->node = MAS_START;
6189
6190 /* Retry unnecessary when holding the write lock. */
6191 entry = mas_state_walk(mas);
6192 if (!entry)
6193 return NULL;
6194
6195 write_retry:
6196 /* Must reset to ensure spanning writes of last slot are detected */
6197 mas_reset(mas);
6198 mas_wr_store_setup(&wr_mas);
6199 mas_wr_store_entry(&wr_mas);
6200 if (mas_nomem(mas, GFP_KERNEL))
6201 goto write_retry;
6202
6203 return entry;
6204 }
6205 EXPORT_SYMBOL_GPL(mas_erase);
6206
6207 /**
6208 * mas_nomem() - Check if there was an error allocating and do the allocation
6209 * if necessary If there are allocations, then free them.
6210 * @mas: The maple state
6211 * @gfp: The GFP_FLAGS to use for allocations
6212 * Return: true on allocation, false otherwise.
6213 */
mas_nomem(struct ma_state * mas,gfp_t gfp)6214 bool mas_nomem(struct ma_state *mas, gfp_t gfp)
6215 __must_hold(mas->tree->ma_lock)
6216 {
6217 if (likely(mas->node != MA_ERROR(-ENOMEM))) {
6218 mas_destroy(mas);
6219 return false;
6220 }
6221
6222 if (gfpflags_allow_blocking(gfp) && !mt_external_lock(mas->tree)) {
6223 mtree_unlock(mas->tree);
6224 mas_alloc_nodes(mas, gfp);
6225 mtree_lock(mas->tree);
6226 } else {
6227 mas_alloc_nodes(mas, gfp);
6228 }
6229
6230 if (!mas_allocated(mas))
6231 return false;
6232
6233 mas->node = MAS_START;
6234 return true;
6235 }
6236
maple_tree_init(void)6237 void __init maple_tree_init(void)
6238 {
6239 maple_node_cache = kmem_cache_create("maple_node",
6240 sizeof(struct maple_node), sizeof(struct maple_node),
6241 SLAB_PANIC, NULL);
6242 }
6243
6244 /**
6245 * mtree_load() - Load a value stored in a maple tree
6246 * @mt: The maple tree
6247 * @index: The index to load
6248 *
6249 * Return: the entry or %NULL
6250 */
mtree_load(struct maple_tree * mt,unsigned long index)6251 void *mtree_load(struct maple_tree *mt, unsigned long index)
6252 {
6253 MA_STATE(mas, mt, index, index);
6254 void *entry;
6255
6256 trace_ma_read(__func__, &mas);
6257 rcu_read_lock();
6258 retry:
6259 entry = mas_start(&mas);
6260 if (unlikely(mas_is_none(&mas)))
6261 goto unlock;
6262
6263 if (unlikely(mas_is_ptr(&mas))) {
6264 if (index)
6265 entry = NULL;
6266
6267 goto unlock;
6268 }
6269
6270 entry = mtree_lookup_walk(&mas);
6271 if (!entry && unlikely(mas_is_start(&mas)))
6272 goto retry;
6273 unlock:
6274 rcu_read_unlock();
6275 if (xa_is_zero(entry))
6276 return NULL;
6277
6278 return entry;
6279 }
6280 EXPORT_SYMBOL(mtree_load);
6281
6282 /**
6283 * mtree_store_range() - Store an entry at a given range.
6284 * @mt: The maple tree
6285 * @index: The start of the range
6286 * @last: The end of the range
6287 * @entry: The entry to store
6288 * @gfp: The GFP_FLAGS to use for allocations
6289 *
6290 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
6291 * be allocated.
6292 */
mtree_store_range(struct maple_tree * mt,unsigned long index,unsigned long last,void * entry,gfp_t gfp)6293 int mtree_store_range(struct maple_tree *mt, unsigned long index,
6294 unsigned long last, void *entry, gfp_t gfp)
6295 {
6296 MA_STATE(mas, mt, index, last);
6297 MA_WR_STATE(wr_mas, &mas, entry);
6298
6299 trace_ma_write(__func__, &mas, 0, entry);
6300 if (WARN_ON_ONCE(xa_is_advanced(entry)))
6301 return -EINVAL;
6302
6303 if (index > last)
6304 return -EINVAL;
6305
6306 mtree_lock(mt);
6307 retry:
6308 mas_wr_store_entry(&wr_mas);
6309 if (mas_nomem(&mas, gfp))
6310 goto retry;
6311
6312 mtree_unlock(mt);
6313 if (mas_is_err(&mas))
6314 return xa_err(mas.node);
6315
6316 return 0;
6317 }
6318 EXPORT_SYMBOL(mtree_store_range);
6319
6320 /**
6321 * mtree_store() - Store an entry at a given index.
6322 * @mt: The maple tree
6323 * @index: The index to store the value
6324 * @entry: The entry to store
6325 * @gfp: The GFP_FLAGS to use for allocations
6326 *
6327 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
6328 * be allocated.
6329 */
mtree_store(struct maple_tree * mt,unsigned long index,void * entry,gfp_t gfp)6330 int mtree_store(struct maple_tree *mt, unsigned long index, void *entry,
6331 gfp_t gfp)
6332 {
6333 return mtree_store_range(mt, index, index, entry, gfp);
6334 }
6335 EXPORT_SYMBOL(mtree_store);
6336
6337 /**
6338 * mtree_insert_range() - Insert an entry at a given range if there is no value.
6339 * @mt: The maple tree
6340 * @first: The start of the range
6341 * @last: The end of the range
6342 * @entry: The entry to store
6343 * @gfp: The GFP_FLAGS to use for allocations.
6344 *
6345 * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid
6346 * request, -ENOMEM if memory could not be allocated.
6347 */
mtree_insert_range(struct maple_tree * mt,unsigned long first,unsigned long last,void * entry,gfp_t gfp)6348 int mtree_insert_range(struct maple_tree *mt, unsigned long first,
6349 unsigned long last, void *entry, gfp_t gfp)
6350 {
6351 MA_STATE(ms, mt, first, last);
6352
6353 if (WARN_ON_ONCE(xa_is_advanced(entry)))
6354 return -EINVAL;
6355
6356 if (first > last)
6357 return -EINVAL;
6358
6359 mtree_lock(mt);
6360 retry:
6361 mas_insert(&ms, entry);
6362 if (mas_nomem(&ms, gfp))
6363 goto retry;
6364
6365 mtree_unlock(mt);
6366 if (mas_is_err(&ms))
6367 return xa_err(ms.node);
6368
6369 return 0;
6370 }
6371 EXPORT_SYMBOL(mtree_insert_range);
6372
6373 /**
6374 * mtree_insert() - Insert an entry at a given index if there is no value.
6375 * @mt: The maple tree
6376 * @index : The index to store the value
6377 * @entry: The entry to store
6378 * @gfp: The GFP_FLAGS to use for allocations.
6379 *
6380 * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid
6381 * request, -ENOMEM if memory could not be allocated.
6382 */
mtree_insert(struct maple_tree * mt,unsigned long index,void * entry,gfp_t gfp)6383 int mtree_insert(struct maple_tree *mt, unsigned long index, void *entry,
6384 gfp_t gfp)
6385 {
6386 return mtree_insert_range(mt, index, index, entry, gfp);
6387 }
6388 EXPORT_SYMBOL(mtree_insert);
6389
mtree_alloc_range(struct maple_tree * mt,unsigned long * startp,void * entry,unsigned long size,unsigned long min,unsigned long max,gfp_t gfp)6390 int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp,
6391 void *entry, unsigned long size, unsigned long min,
6392 unsigned long max, gfp_t gfp)
6393 {
6394 int ret = 0;
6395
6396 MA_STATE(mas, mt, 0, 0);
6397 if (!mt_is_alloc(mt))
6398 return -EINVAL;
6399
6400 if (WARN_ON_ONCE(mt_is_reserved(entry)))
6401 return -EINVAL;
6402
6403 mtree_lock(mt);
6404 retry:
6405 ret = mas_empty_area(&mas, min, max, size);
6406 if (ret)
6407 goto unlock;
6408
6409 mas_insert(&mas, entry);
6410 /*
6411 * mas_nomem() may release the lock, causing the allocated area
6412 * to be unavailable, so try to allocate a free area again.
6413 */
6414 if (mas_nomem(&mas, gfp))
6415 goto retry;
6416
6417 if (mas_is_err(&mas))
6418 ret = xa_err(mas.node);
6419 else
6420 *startp = mas.index;
6421
6422 unlock:
6423 mtree_unlock(mt);
6424 return ret;
6425 }
6426 EXPORT_SYMBOL(mtree_alloc_range);
6427
mtree_alloc_rrange(struct maple_tree * mt,unsigned long * startp,void * entry,unsigned long size,unsigned long min,unsigned long max,gfp_t gfp)6428 int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp,
6429 void *entry, unsigned long size, unsigned long min,
6430 unsigned long max, gfp_t gfp)
6431 {
6432 int ret = 0;
6433
6434 MA_STATE(mas, mt, 0, 0);
6435 if (!mt_is_alloc(mt))
6436 return -EINVAL;
6437
6438 if (WARN_ON_ONCE(mt_is_reserved(entry)))
6439 return -EINVAL;
6440
6441 mtree_lock(mt);
6442 retry:
6443 ret = mas_empty_area_rev(&mas, min, max, size);
6444 if (ret)
6445 goto unlock;
6446
6447 mas_insert(&mas, entry);
6448 /*
6449 * mas_nomem() may release the lock, causing the allocated area
6450 * to be unavailable, so try to allocate a free area again.
6451 */
6452 if (mas_nomem(&mas, gfp))
6453 goto retry;
6454
6455 if (mas_is_err(&mas))
6456 ret = xa_err(mas.node);
6457 else
6458 *startp = mas.index;
6459
6460 unlock:
6461 mtree_unlock(mt);
6462 return ret;
6463 }
6464 EXPORT_SYMBOL(mtree_alloc_rrange);
6465
6466 /**
6467 * mtree_erase() - Find an index and erase the entire range.
6468 * @mt: The maple tree
6469 * @index: The index to erase
6470 *
6471 * Erasing is the same as a walk to an entry then a store of a NULL to that
6472 * ENTIRE range. In fact, it is implemented as such using the advanced API.
6473 *
6474 * Return: The entry stored at the @index or %NULL
6475 */
mtree_erase(struct maple_tree * mt,unsigned long index)6476 void *mtree_erase(struct maple_tree *mt, unsigned long index)
6477 {
6478 void *entry = NULL;
6479
6480 MA_STATE(mas, mt, index, index);
6481 trace_ma_op(__func__, &mas);
6482
6483 mtree_lock(mt);
6484 entry = mas_erase(&mas);
6485 mtree_unlock(mt);
6486
6487 return entry;
6488 }
6489 EXPORT_SYMBOL(mtree_erase);
6490
6491 /*
6492 * mas_dup_free() - Free an incomplete duplication of a tree.
6493 * @mas: The maple state of a incomplete tree.
6494 *
6495 * The parameter @mas->node passed in indicates that the allocation failed on
6496 * this node. This function frees all nodes starting from @mas->node in the
6497 * reverse order of mas_dup_build(). There is no need to hold the source tree
6498 * lock at this time.
6499 */
mas_dup_free(struct ma_state * mas)6500 static void mas_dup_free(struct ma_state *mas)
6501 {
6502 struct maple_node *node;
6503 enum maple_type type;
6504 void __rcu **slots;
6505 unsigned char count, i;
6506
6507 /* Maybe the first node allocation failed. */
6508 if (mas_is_none(mas))
6509 return;
6510
6511 while (!mte_is_root(mas->node)) {
6512 mas_ascend(mas);
6513 if (mas->offset) {
6514 mas->offset--;
6515 do {
6516 mas_descend(mas);
6517 mas->offset = mas_data_end(mas);
6518 } while (!mte_is_leaf(mas->node));
6519
6520 mas_ascend(mas);
6521 }
6522
6523 node = mte_to_node(mas->node);
6524 type = mte_node_type(mas->node);
6525 slots = ma_slots(node, type);
6526 count = mas_data_end(mas) + 1;
6527 for (i = 0; i < count; i++)
6528 ((unsigned long *)slots)[i] &= ~MAPLE_NODE_MASK;
6529 mt_free_bulk(count, slots);
6530 }
6531
6532 node = mte_to_node(mas->node);
6533 mt_free_one(node);
6534 }
6535
6536 /*
6537 * mas_copy_node() - Copy a maple node and replace the parent.
6538 * @mas: The maple state of source tree.
6539 * @new_mas: The maple state of new tree.
6540 * @parent: The parent of the new node.
6541 *
6542 * Copy @mas->node to @new_mas->node, set @parent to be the parent of
6543 * @new_mas->node. If memory allocation fails, @mas is set to -ENOMEM.
6544 */
mas_copy_node(struct ma_state * mas,struct ma_state * new_mas,struct maple_pnode * parent)6545 static inline void mas_copy_node(struct ma_state *mas, struct ma_state *new_mas,
6546 struct maple_pnode *parent)
6547 {
6548 struct maple_node *node = mte_to_node(mas->node);
6549 struct maple_node *new_node = mte_to_node(new_mas->node);
6550 unsigned long val;
6551
6552 /* Copy the node completely. */
6553 memcpy(new_node, node, sizeof(struct maple_node));
6554 /* Update the parent node pointer. */
6555 val = (unsigned long)node->parent & MAPLE_NODE_MASK;
6556 new_node->parent = ma_parent_ptr(val | (unsigned long)parent);
6557 }
6558
6559 /*
6560 * mas_dup_alloc() - Allocate child nodes for a maple node.
6561 * @mas: The maple state of source tree.
6562 * @new_mas: The maple state of new tree.
6563 * @gfp: The GFP_FLAGS to use for allocations.
6564 *
6565 * This function allocates child nodes for @new_mas->node during the duplication
6566 * process. If memory allocation fails, @mas is set to -ENOMEM.
6567 */
mas_dup_alloc(struct ma_state * mas,struct ma_state * new_mas,gfp_t gfp)6568 static inline void mas_dup_alloc(struct ma_state *mas, struct ma_state *new_mas,
6569 gfp_t gfp)
6570 {
6571 struct maple_node *node = mte_to_node(mas->node);
6572 struct maple_node *new_node = mte_to_node(new_mas->node);
6573 enum maple_type type;
6574 unsigned char request, count, i;
6575 void __rcu **slots;
6576 void __rcu **new_slots;
6577 unsigned long val;
6578
6579 /* Allocate memory for child nodes. */
6580 type = mte_node_type(mas->node);
6581 new_slots = ma_slots(new_node, type);
6582 request = mas_data_end(mas) + 1;
6583 count = mt_alloc_bulk(gfp, request, (void **)new_slots);
6584 if (unlikely(count < request)) {
6585 memset(new_slots, 0, request * sizeof(void *));
6586 mas_set_err(mas, -ENOMEM);
6587 return;
6588 }
6589
6590 /* Restore node type information in slots. */
6591 slots = ma_slots(node, type);
6592 for (i = 0; i < count; i++) {
6593 val = (unsigned long)mt_slot_locked(mas->tree, slots, i);
6594 val &= MAPLE_NODE_MASK;
6595 ((unsigned long *)new_slots)[i] |= val;
6596 }
6597 }
6598
6599 /*
6600 * mas_dup_build() - Build a new maple tree from a source tree
6601 * @mas: The maple state of source tree, need to be in MAS_START state.
6602 * @new_mas: The maple state of new tree, need to be in MAS_START state.
6603 * @gfp: The GFP_FLAGS to use for allocations.
6604 *
6605 * This function builds a new tree in DFS preorder. If the memory allocation
6606 * fails, the error code -ENOMEM will be set in @mas, and @new_mas points to the
6607 * last node. mas_dup_free() will free the incomplete duplication of a tree.
6608 *
6609 * Note that the attributes of the two trees need to be exactly the same, and the
6610 * new tree needs to be empty, otherwise -EINVAL will be set in @mas.
6611 */
mas_dup_build(struct ma_state * mas,struct ma_state * new_mas,gfp_t gfp)6612 static inline void mas_dup_build(struct ma_state *mas, struct ma_state *new_mas,
6613 gfp_t gfp)
6614 {
6615 struct maple_node *node;
6616 struct maple_pnode *parent = NULL;
6617 struct maple_enode *root;
6618 enum maple_type type;
6619
6620 if (unlikely(mt_attr(mas->tree) != mt_attr(new_mas->tree)) ||
6621 unlikely(!mtree_empty(new_mas->tree))) {
6622 mas_set_err(mas, -EINVAL);
6623 return;
6624 }
6625
6626 root = mas_start(mas);
6627 if (mas_is_ptr(mas) || mas_is_none(mas))
6628 goto set_new_tree;
6629
6630 node = mt_alloc_one(gfp);
6631 if (!node) {
6632 new_mas->node = MAS_NONE;
6633 mas_set_err(mas, -ENOMEM);
6634 return;
6635 }
6636
6637 type = mte_node_type(mas->node);
6638 root = mt_mk_node(node, type);
6639 new_mas->node = root;
6640 new_mas->min = 0;
6641 new_mas->max = ULONG_MAX;
6642 root = mte_mk_root(root);
6643 while (1) {
6644 mas_copy_node(mas, new_mas, parent);
6645 if (!mte_is_leaf(mas->node)) {
6646 /* Only allocate child nodes for non-leaf nodes. */
6647 mas_dup_alloc(mas, new_mas, gfp);
6648 if (unlikely(mas_is_err(mas)))
6649 return;
6650 } else {
6651 /*
6652 * This is the last leaf node and duplication is
6653 * completed.
6654 */
6655 if (mas->max == ULONG_MAX)
6656 goto done;
6657
6658 /* This is not the last leaf node and needs to go up. */
6659 do {
6660 mas_ascend(mas);
6661 mas_ascend(new_mas);
6662 } while (mas->offset == mas_data_end(mas));
6663
6664 /* Move to the next subtree. */
6665 mas->offset++;
6666 new_mas->offset++;
6667 }
6668
6669 mas_descend(mas);
6670 parent = ma_parent_ptr(mte_to_node(new_mas->node));
6671 mas_descend(new_mas);
6672 mas->offset = 0;
6673 new_mas->offset = 0;
6674 }
6675 done:
6676 /* Specially handle the parent of the root node. */
6677 mte_to_node(root)->parent = ma_parent_ptr(mas_tree_parent(new_mas));
6678 set_new_tree:
6679 /* Make them the same height */
6680 new_mas->tree->ma_flags = mas->tree->ma_flags;
6681 rcu_assign_pointer(new_mas->tree->ma_root, root);
6682 }
6683
6684 /**
6685 * __mt_dup(): Duplicate an entire maple tree
6686 * @mt: The source maple tree
6687 * @new: The new maple tree
6688 * @gfp: The GFP_FLAGS to use for allocations
6689 *
6690 * This function duplicates a maple tree in Depth-First Search (DFS) pre-order
6691 * traversal. It uses memcpy() to copy nodes in the source tree and allocate
6692 * new child nodes in non-leaf nodes. The new node is exactly the same as the
6693 * source node except for all the addresses stored in it. It will be faster than
6694 * traversing all elements in the source tree and inserting them one by one into
6695 * the new tree.
6696 * The user needs to ensure that the attributes of the source tree and the new
6697 * tree are the same, and the new tree needs to be an empty tree, otherwise
6698 * -EINVAL will be returned.
6699 * Note that the user needs to manually lock the source tree and the new tree.
6700 *
6701 * Return: 0 on success, -ENOMEM if memory could not be allocated, -EINVAL If
6702 * the attributes of the two trees are different or the new tree is not an empty
6703 * tree.
6704 */
__mt_dup(struct maple_tree * mt,struct maple_tree * new,gfp_t gfp)6705 int __mt_dup(struct maple_tree *mt, struct maple_tree *new, gfp_t gfp)
6706 {
6707 int ret = 0;
6708 MA_STATE(mas, mt, 0, 0);
6709 MA_STATE(new_mas, new, 0, 0);
6710
6711 mas_dup_build(&mas, &new_mas, gfp);
6712 if (unlikely(mas_is_err(&mas))) {
6713 ret = xa_err(mas.node);
6714 if (ret == -ENOMEM)
6715 mas_dup_free(&new_mas);
6716 }
6717
6718 return ret;
6719 }
6720 EXPORT_SYMBOL(__mt_dup);
6721
6722 /**
6723 * mtree_dup(): Duplicate an entire maple tree
6724 * @mt: The source maple tree
6725 * @new: The new maple tree
6726 * @gfp: The GFP_FLAGS to use for allocations
6727 *
6728 * This function duplicates a maple tree in Depth-First Search (DFS) pre-order
6729 * traversal. It uses memcpy() to copy nodes in the source tree and allocate
6730 * new child nodes in non-leaf nodes. The new node is exactly the same as the
6731 * source node except for all the addresses stored in it. It will be faster than
6732 * traversing all elements in the source tree and inserting them one by one into
6733 * the new tree.
6734 * The user needs to ensure that the attributes of the source tree and the new
6735 * tree are the same, and the new tree needs to be an empty tree, otherwise
6736 * -EINVAL will be returned.
6737 *
6738 * Return: 0 on success, -ENOMEM if memory could not be allocated, -EINVAL If
6739 * the attributes of the two trees are different or the new tree is not an empty
6740 * tree.
6741 */
mtree_dup(struct maple_tree * mt,struct maple_tree * new,gfp_t gfp)6742 int mtree_dup(struct maple_tree *mt, struct maple_tree *new, gfp_t gfp)
6743 {
6744 int ret = 0;
6745 MA_STATE(mas, mt, 0, 0);
6746 MA_STATE(new_mas, new, 0, 0);
6747
6748 mas_lock(&new_mas);
6749 mas_lock_nested(&mas, SINGLE_DEPTH_NESTING);
6750 mas_dup_build(&mas, &new_mas, gfp);
6751 mas_unlock(&mas);
6752 if (unlikely(mas_is_err(&mas))) {
6753 ret = xa_err(mas.node);
6754 if (ret == -ENOMEM)
6755 mas_dup_free(&new_mas);
6756 }
6757
6758 mas_unlock(&new_mas);
6759 return ret;
6760 }
6761 EXPORT_SYMBOL(mtree_dup);
6762
6763 /**
6764 * __mt_destroy() - Walk and free all nodes of a locked maple tree.
6765 * @mt: The maple tree
6766 *
6767 * Note: Does not handle locking.
6768 */
__mt_destroy(struct maple_tree * mt)6769 void __mt_destroy(struct maple_tree *mt)
6770 {
6771 void *root = mt_root_locked(mt);
6772
6773 rcu_assign_pointer(mt->ma_root, NULL);
6774 if (xa_is_node(root))
6775 mte_destroy_walk(root, mt);
6776
6777 mt->ma_flags = mt_attr(mt);
6778 }
6779 EXPORT_SYMBOL_GPL(__mt_destroy);
6780
6781 /**
6782 * mtree_destroy() - Destroy a maple tree
6783 * @mt: The maple tree
6784 *
6785 * Frees all resources used by the tree. Handles locking.
6786 */
mtree_destroy(struct maple_tree * mt)6787 void mtree_destroy(struct maple_tree *mt)
6788 {
6789 mtree_lock(mt);
6790 __mt_destroy(mt);
6791 mtree_unlock(mt);
6792 }
6793 EXPORT_SYMBOL(mtree_destroy);
6794
6795 /**
6796 * mt_find() - Search from the start up until an entry is found.
6797 * @mt: The maple tree
6798 * @index: Pointer which contains the start location of the search
6799 * @max: The maximum value of the search range
6800 *
6801 * Takes RCU read lock internally to protect the search, which does not
6802 * protect the returned pointer after dropping RCU read lock.
6803 * See also: Documentation/core-api/maple_tree.rst
6804 *
6805 * In case that an entry is found @index is updated to point to the next
6806 * possible entry independent whether the found entry is occupying a
6807 * single index or a range if indices.
6808 *
6809 * Return: The entry at or after the @index or %NULL
6810 */
mt_find(struct maple_tree * mt,unsigned long * index,unsigned long max)6811 void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max)
6812 {
6813 MA_STATE(mas, mt, *index, *index);
6814 void *entry;
6815 #ifdef CONFIG_DEBUG_MAPLE_TREE
6816 unsigned long copy = *index;
6817 #endif
6818
6819 trace_ma_read(__func__, &mas);
6820
6821 if ((*index) > max)
6822 return NULL;
6823
6824 rcu_read_lock();
6825 retry:
6826 entry = mas_state_walk(&mas);
6827 if (mas_is_start(&mas))
6828 goto retry;
6829
6830 if (unlikely(xa_is_zero(entry)))
6831 entry = NULL;
6832
6833 if (entry)
6834 goto unlock;
6835
6836 while (mas_searchable(&mas) && (mas.last < max)) {
6837 entry = mas_next_entry(&mas, max);
6838 if (likely(entry && !xa_is_zero(entry)))
6839 break;
6840 }
6841
6842 if (unlikely(xa_is_zero(entry)))
6843 entry = NULL;
6844 unlock:
6845 rcu_read_unlock();
6846 if (likely(entry)) {
6847 *index = mas.last + 1;
6848 #ifdef CONFIG_DEBUG_MAPLE_TREE
6849 if (MT_WARN_ON(mt, (*index) && ((*index) <= copy)))
6850 pr_err("index not increased! %lx <= %lx\n",
6851 *index, copy);
6852 #endif
6853 }
6854
6855 return entry;
6856 }
6857 EXPORT_SYMBOL(mt_find);
6858
6859 /**
6860 * mt_find_after() - Search from the start up until an entry is found.
6861 * @mt: The maple tree
6862 * @index: Pointer which contains the start location of the search
6863 * @max: The maximum value to check
6864 *
6865 * Same as mt_find() except that it checks @index for 0 before
6866 * searching. If @index == 0, the search is aborted. This covers a wrap
6867 * around of @index to 0 in an iterator loop.
6868 *
6869 * Return: The entry at or after the @index or %NULL
6870 */
mt_find_after(struct maple_tree * mt,unsigned long * index,unsigned long max)6871 void *mt_find_after(struct maple_tree *mt, unsigned long *index,
6872 unsigned long max)
6873 {
6874 if (!(*index))
6875 return NULL;
6876
6877 return mt_find(mt, index, max);
6878 }
6879 EXPORT_SYMBOL(mt_find_after);
6880
6881 #ifdef CONFIG_DEBUG_MAPLE_TREE
6882 atomic_t maple_tree_tests_run;
6883 EXPORT_SYMBOL_GPL(maple_tree_tests_run);
6884 atomic_t maple_tree_tests_passed;
6885 EXPORT_SYMBOL_GPL(maple_tree_tests_passed);
6886
6887 #ifndef __KERNEL__
6888 extern void kmem_cache_set_non_kernel(struct kmem_cache *, unsigned int);
mt_set_non_kernel(unsigned int val)6889 void mt_set_non_kernel(unsigned int val)
6890 {
6891 kmem_cache_set_non_kernel(maple_node_cache, val);
6892 }
6893
6894 extern unsigned long kmem_cache_get_alloc(struct kmem_cache *);
mt_get_alloc_size(void)6895 unsigned long mt_get_alloc_size(void)
6896 {
6897 return kmem_cache_get_alloc(maple_node_cache);
6898 }
6899
6900 extern void kmem_cache_zero_nr_tallocated(struct kmem_cache *);
mt_zero_nr_tallocated(void)6901 void mt_zero_nr_tallocated(void)
6902 {
6903 kmem_cache_zero_nr_tallocated(maple_node_cache);
6904 }
6905
6906 extern unsigned int kmem_cache_nr_tallocated(struct kmem_cache *);
mt_nr_tallocated(void)6907 unsigned int mt_nr_tallocated(void)
6908 {
6909 return kmem_cache_nr_tallocated(maple_node_cache);
6910 }
6911
6912 extern unsigned int kmem_cache_nr_allocated(struct kmem_cache *);
mt_nr_allocated(void)6913 unsigned int mt_nr_allocated(void)
6914 {
6915 return kmem_cache_nr_allocated(maple_node_cache);
6916 }
6917
6918 /*
6919 * mas_dead_node() - Check if the maple state is pointing to a dead node.
6920 * @mas: The maple state
6921 * @index: The index to restore in @mas.
6922 *
6923 * Used in test code.
6924 * Return: 1 if @mas has been reset to MAS_START, 0 otherwise.
6925 */
mas_dead_node(struct ma_state * mas,unsigned long index)6926 static inline int mas_dead_node(struct ma_state *mas, unsigned long index)
6927 {
6928 if (unlikely(!mas_searchable(mas) || mas_is_start(mas)))
6929 return 0;
6930
6931 if (likely(!mte_dead_node(mas->node)))
6932 return 0;
6933
6934 mas_rewalk(mas, index);
6935 return 1;
6936 }
6937
mt_cache_shrink(void)6938 void mt_cache_shrink(void)
6939 {
6940 }
6941 #else
6942 /*
6943 * mt_cache_shrink() - For testing, don't use this.
6944 *
6945 * Certain testcases can trigger an OOM when combined with other memory
6946 * debugging configuration options. This function is used to reduce the
6947 * possibility of an out of memory even due to kmem_cache objects remaining
6948 * around for longer than usual.
6949 */
mt_cache_shrink(void)6950 void mt_cache_shrink(void)
6951 {
6952 kmem_cache_shrink(maple_node_cache);
6953
6954 }
6955 EXPORT_SYMBOL_GPL(mt_cache_shrink);
6956
6957 #endif /* not defined __KERNEL__ */
6958 /*
6959 * mas_get_slot() - Get the entry in the maple state node stored at @offset.
6960 * @mas: The maple state
6961 * @offset: The offset into the slot array to fetch.
6962 *
6963 * Return: The entry stored at @offset.
6964 */
mas_get_slot(struct ma_state * mas,unsigned char offset)6965 static inline struct maple_enode *mas_get_slot(struct ma_state *mas,
6966 unsigned char offset)
6967 {
6968 return mas_slot(mas, ma_slots(mas_mn(mas), mte_node_type(mas->node)),
6969 offset);
6970 }
6971
6972 /* Depth first search, post-order */
mas_dfs_postorder(struct ma_state * mas,unsigned long max)6973 static void mas_dfs_postorder(struct ma_state *mas, unsigned long max)
6974 {
6975
6976 struct maple_enode *p = MAS_NONE, *mn = mas->node;
6977 unsigned long p_min, p_max;
6978
6979 mas_next_node(mas, mas_mn(mas), max);
6980 if (!mas_is_none(mas))
6981 return;
6982
6983 if (mte_is_root(mn))
6984 return;
6985
6986 mas->node = mn;
6987 mas_ascend(mas);
6988 do {
6989 p = mas->node;
6990 p_min = mas->min;
6991 p_max = mas->max;
6992 mas_prev_node(mas, 0);
6993 } while (!mas_is_none(mas));
6994
6995 mas->node = p;
6996 mas->max = p_max;
6997 mas->min = p_min;
6998 }
6999
7000 /* Tree validations */
7001 static void mt_dump_node(const struct maple_tree *mt, void *entry,
7002 unsigned long min, unsigned long max, unsigned int depth,
7003 enum mt_dump_format format);
mt_dump_range(unsigned long min,unsigned long max,unsigned int depth,enum mt_dump_format format)7004 static void mt_dump_range(unsigned long min, unsigned long max,
7005 unsigned int depth, enum mt_dump_format format)
7006 {
7007 static const char spaces[] = " ";
7008
7009 switch(format) {
7010 case mt_dump_hex:
7011 if (min == max)
7012 pr_info("%.*s%lx: ", depth * 2, spaces, min);
7013 else
7014 pr_info("%.*s%lx-%lx: ", depth * 2, spaces, min, max);
7015 break;
7016 default:
7017 case mt_dump_dec:
7018 if (min == max)
7019 pr_info("%.*s%lu: ", depth * 2, spaces, min);
7020 else
7021 pr_info("%.*s%lu-%lu: ", depth * 2, spaces, min, max);
7022 }
7023 }
7024
mt_dump_entry(void * entry,unsigned long min,unsigned long max,unsigned int depth,enum mt_dump_format format)7025 static void mt_dump_entry(void *entry, unsigned long min, unsigned long max,
7026 unsigned int depth, enum mt_dump_format format)
7027 {
7028 mt_dump_range(min, max, depth, format);
7029
7030 if (xa_is_value(entry))
7031 pr_cont("value %ld (0x%lx) [%p]\n", xa_to_value(entry),
7032 xa_to_value(entry), entry);
7033 else if (xa_is_zero(entry))
7034 pr_cont("zero (%ld)\n", xa_to_internal(entry));
7035 else if (mt_is_reserved(entry))
7036 pr_cont("UNKNOWN ENTRY (%p)\n", entry);
7037 else
7038 pr_cont("%p\n", entry);
7039 }
7040
mt_dump_range64(const struct maple_tree * mt,void * entry,unsigned long min,unsigned long max,unsigned int depth,enum mt_dump_format format)7041 static void mt_dump_range64(const struct maple_tree *mt, void *entry,
7042 unsigned long min, unsigned long max, unsigned int depth,
7043 enum mt_dump_format format)
7044 {
7045 struct maple_range_64 *node = &mte_to_node(entry)->mr64;
7046 bool leaf = mte_is_leaf(entry);
7047 unsigned long first = min;
7048 int i;
7049
7050 pr_cont(" contents: ");
7051 for (i = 0; i < MAPLE_RANGE64_SLOTS - 1; i++) {
7052 switch(format) {
7053 case mt_dump_hex:
7054 pr_cont("%p %lX ", node->slot[i], node->pivot[i]);
7055 break;
7056 default:
7057 case mt_dump_dec:
7058 pr_cont("%p %lu ", node->slot[i], node->pivot[i]);
7059 }
7060 }
7061 pr_cont("%p\n", node->slot[i]);
7062 for (i = 0; i < MAPLE_RANGE64_SLOTS; i++) {
7063 unsigned long last = max;
7064
7065 if (i < (MAPLE_RANGE64_SLOTS - 1))
7066 last = node->pivot[i];
7067 else if (!node->slot[i] && max != mt_node_max(entry))
7068 break;
7069 if (last == 0 && i > 0)
7070 break;
7071 if (leaf)
7072 mt_dump_entry(mt_slot(mt, node->slot, i),
7073 first, last, depth + 1, format);
7074 else if (node->slot[i])
7075 mt_dump_node(mt, mt_slot(mt, node->slot, i),
7076 first, last, depth + 1, format);
7077
7078 if (last == max)
7079 break;
7080 if (last > max) {
7081 switch(format) {
7082 case mt_dump_hex:
7083 pr_err("node %p last (%lx) > max (%lx) at pivot %d!\n",
7084 node, last, max, i);
7085 break;
7086 default:
7087 case mt_dump_dec:
7088 pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
7089 node, last, max, i);
7090 }
7091 }
7092 first = last + 1;
7093 }
7094 }
7095
mt_dump_arange64(const struct maple_tree * mt,void * entry,unsigned long min,unsigned long max,unsigned int depth,enum mt_dump_format format)7096 static void mt_dump_arange64(const struct maple_tree *mt, void *entry,
7097 unsigned long min, unsigned long max, unsigned int depth,
7098 enum mt_dump_format format)
7099 {
7100 struct maple_arange_64 *node = &mte_to_node(entry)->ma64;
7101 bool leaf = mte_is_leaf(entry);
7102 unsigned long first = min;
7103 int i;
7104
7105 pr_cont(" contents: ");
7106 for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++) {
7107 switch (format) {
7108 case mt_dump_hex:
7109 pr_cont("%lx ", node->gap[i]);
7110 break;
7111 default:
7112 case mt_dump_dec:
7113 pr_cont("%lu ", node->gap[i]);
7114 }
7115 }
7116 pr_cont("| %02X %02X| ", node->meta.end, node->meta.gap);
7117 for (i = 0; i < MAPLE_ARANGE64_SLOTS - 1; i++) {
7118 switch (format) {
7119 case mt_dump_hex:
7120 pr_cont("%p %lX ", node->slot[i], node->pivot[i]);
7121 break;
7122 default:
7123 case mt_dump_dec:
7124 pr_cont("%p %lu ", node->slot[i], node->pivot[i]);
7125 }
7126 }
7127 pr_cont("%p\n", node->slot[i]);
7128 for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++) {
7129 unsigned long last = max;
7130
7131 if (i < (MAPLE_ARANGE64_SLOTS - 1))
7132 last = node->pivot[i];
7133 else if (!node->slot[i])
7134 break;
7135 if (last == 0 && i > 0)
7136 break;
7137 if (leaf)
7138 mt_dump_entry(mt_slot(mt, node->slot, i),
7139 first, last, depth + 1, format);
7140 else if (node->slot[i])
7141 mt_dump_node(mt, mt_slot(mt, node->slot, i),
7142 first, last, depth + 1, format);
7143
7144 if (last == max)
7145 break;
7146 if (last > max) {
7147 pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
7148 node, last, max, i);
7149 break;
7150 }
7151 first = last + 1;
7152 }
7153 }
7154
mt_dump_node(const struct maple_tree * mt,void * entry,unsigned long min,unsigned long max,unsigned int depth,enum mt_dump_format format)7155 static void mt_dump_node(const struct maple_tree *mt, void *entry,
7156 unsigned long min, unsigned long max, unsigned int depth,
7157 enum mt_dump_format format)
7158 {
7159 struct maple_node *node = mte_to_node(entry);
7160 unsigned int type = mte_node_type(entry);
7161 unsigned int i;
7162
7163 mt_dump_range(min, max, depth, format);
7164
7165 pr_cont("node %p depth %d type %d parent %p", node, depth, type,
7166 node ? node->parent : NULL);
7167 switch (type) {
7168 case maple_dense:
7169 pr_cont("\n");
7170 for (i = 0; i < MAPLE_NODE_SLOTS; i++) {
7171 if (min + i > max)
7172 pr_cont("OUT OF RANGE: ");
7173 mt_dump_entry(mt_slot(mt, node->slot, i),
7174 min + i, min + i, depth, format);
7175 }
7176 break;
7177 case maple_leaf_64:
7178 case maple_range_64:
7179 mt_dump_range64(mt, entry, min, max, depth, format);
7180 break;
7181 case maple_arange_64:
7182 mt_dump_arange64(mt, entry, min, max, depth, format);
7183 break;
7184
7185 default:
7186 pr_cont(" UNKNOWN TYPE\n");
7187 }
7188 }
7189
mt_dump(const struct maple_tree * mt,enum mt_dump_format format)7190 void mt_dump(const struct maple_tree *mt, enum mt_dump_format format)
7191 {
7192 void *entry = rcu_dereference_check(mt->ma_root, mt_locked(mt));
7193
7194 pr_info("maple_tree(%p) flags %X, height %u root %p\n",
7195 mt, mt->ma_flags, mt_height(mt), entry);
7196 if (!xa_is_node(entry))
7197 mt_dump_entry(entry, 0, 0, 0, format);
7198 else if (entry)
7199 mt_dump_node(mt, entry, 0, mt_node_max(entry), 0, format);
7200 }
7201 EXPORT_SYMBOL_GPL(mt_dump);
7202
7203 /*
7204 * Calculate the maximum gap in a node and check if that's what is reported in
7205 * the parent (unless root).
7206 */
mas_validate_gaps(struct ma_state * mas)7207 static void mas_validate_gaps(struct ma_state *mas)
7208 {
7209 struct maple_enode *mte = mas->node;
7210 struct maple_node *p_mn, *node = mte_to_node(mte);
7211 enum maple_type mt = mte_node_type(mas->node);
7212 unsigned long gap = 0, max_gap = 0;
7213 unsigned long p_end, p_start = mas->min;
7214 unsigned char p_slot, offset;
7215 unsigned long *gaps = NULL;
7216 unsigned long *pivots = ma_pivots(node, mt);
7217 unsigned int i;
7218
7219 if (ma_is_dense(mt)) {
7220 for (i = 0; i < mt_slot_count(mte); i++) {
7221 if (mas_get_slot(mas, i)) {
7222 if (gap > max_gap)
7223 max_gap = gap;
7224 gap = 0;
7225 continue;
7226 }
7227 gap++;
7228 }
7229 goto counted;
7230 }
7231
7232 gaps = ma_gaps(node, mt);
7233 for (i = 0; i < mt_slot_count(mte); i++) {
7234 p_end = mas_safe_pivot(mas, pivots, i, mt);
7235
7236 if (!gaps) {
7237 if (!mas_get_slot(mas, i))
7238 gap = p_end - p_start + 1;
7239 } else {
7240 void *entry = mas_get_slot(mas, i);
7241
7242 gap = gaps[i];
7243 MT_BUG_ON(mas->tree, !entry);
7244
7245 if (gap > p_end - p_start + 1) {
7246 pr_err("%p[%u] %lu >= %lu - %lu + 1 (%lu)\n",
7247 mas_mn(mas), i, gap, p_end, p_start,
7248 p_end - p_start + 1);
7249 MT_BUG_ON(mas->tree, gap > p_end - p_start + 1);
7250 }
7251 }
7252
7253 if (gap > max_gap)
7254 max_gap = gap;
7255
7256 p_start = p_end + 1;
7257 if (p_end >= mas->max)
7258 break;
7259 }
7260
7261 counted:
7262 if (mt == maple_arange_64) {
7263 offset = ma_meta_gap(node, mt);
7264 if (offset > i) {
7265 pr_err("gap offset %p[%u] is invalid\n", node, offset);
7266 MT_BUG_ON(mas->tree, 1);
7267 }
7268
7269 if (gaps[offset] != max_gap) {
7270 pr_err("gap %p[%u] is not the largest gap %lu\n",
7271 node, offset, max_gap);
7272 MT_BUG_ON(mas->tree, 1);
7273 }
7274
7275 MT_BUG_ON(mas->tree, !gaps);
7276 for (i++ ; i < mt_slot_count(mte); i++) {
7277 if (gaps[i] != 0) {
7278 pr_err("gap %p[%u] beyond node limit != 0\n",
7279 node, i);
7280 MT_BUG_ON(mas->tree, 1);
7281 }
7282 }
7283 }
7284
7285 if (mte_is_root(mte))
7286 return;
7287
7288 p_slot = mte_parent_slot(mas->node);
7289 p_mn = mte_parent(mte);
7290 MT_BUG_ON(mas->tree, max_gap > mas->max);
7291 if (ma_gaps(p_mn, mas_parent_type(mas, mte))[p_slot] != max_gap) {
7292 pr_err("gap %p[%u] != %lu\n", p_mn, p_slot, max_gap);
7293 mt_dump(mas->tree, mt_dump_hex);
7294 MT_BUG_ON(mas->tree, 1);
7295 }
7296 }
7297
mas_validate_parent_slot(struct ma_state * mas)7298 static void mas_validate_parent_slot(struct ma_state *mas)
7299 {
7300 struct maple_node *parent;
7301 struct maple_enode *node;
7302 enum maple_type p_type;
7303 unsigned char p_slot;
7304 void __rcu **slots;
7305 int i;
7306
7307 if (mte_is_root(mas->node))
7308 return;
7309
7310 p_slot = mte_parent_slot(mas->node);
7311 p_type = mas_parent_type(mas, mas->node);
7312 parent = mte_parent(mas->node);
7313 slots = ma_slots(parent, p_type);
7314 MT_BUG_ON(mas->tree, mas_mn(mas) == parent);
7315
7316 /* Check prev/next parent slot for duplicate node entry */
7317
7318 for (i = 0; i < mt_slots[p_type]; i++) {
7319 node = mas_slot(mas, slots, i);
7320 if (i == p_slot) {
7321 if (node != mas->node)
7322 pr_err("parent %p[%u] does not have %p\n",
7323 parent, i, mas_mn(mas));
7324 MT_BUG_ON(mas->tree, node != mas->node);
7325 } else if (node == mas->node) {
7326 pr_err("Invalid child %p at parent %p[%u] p_slot %u\n",
7327 mas_mn(mas), parent, i, p_slot);
7328 MT_BUG_ON(mas->tree, node == mas->node);
7329 }
7330 }
7331 }
7332
mas_validate_child_slot(struct ma_state * mas)7333 static void mas_validate_child_slot(struct ma_state *mas)
7334 {
7335 enum maple_type type = mte_node_type(mas->node);
7336 void __rcu **slots = ma_slots(mte_to_node(mas->node), type);
7337 unsigned long *pivots = ma_pivots(mte_to_node(mas->node), type);
7338 struct maple_enode *child;
7339 unsigned char i;
7340
7341 if (mte_is_leaf(mas->node))
7342 return;
7343
7344 for (i = 0; i < mt_slots[type]; i++) {
7345 child = mas_slot(mas, slots, i);
7346
7347 if (!child) {
7348 pr_err("Non-leaf node lacks child at %p[%u]\n",
7349 mas_mn(mas), i);
7350 MT_BUG_ON(mas->tree, 1);
7351 }
7352
7353 if (mte_parent_slot(child) != i) {
7354 pr_err("Slot error at %p[%u]: child %p has pslot %u\n",
7355 mas_mn(mas), i, mte_to_node(child),
7356 mte_parent_slot(child));
7357 MT_BUG_ON(mas->tree, 1);
7358 }
7359
7360 if (mte_parent(child) != mte_to_node(mas->node)) {
7361 pr_err("child %p has parent %p not %p\n",
7362 mte_to_node(child), mte_parent(child),
7363 mte_to_node(mas->node));
7364 MT_BUG_ON(mas->tree, 1);
7365 }
7366
7367 if (i < mt_pivots[type] && pivots[i] == mas->max)
7368 break;
7369 }
7370 }
7371
7372 /*
7373 * Validate all pivots are within mas->min and mas->max, check metadata ends
7374 * where the maximum ends and ensure there is no slots or pivots set outside of
7375 * the end of the data.
7376 */
mas_validate_limits(struct ma_state * mas)7377 static void mas_validate_limits(struct ma_state *mas)
7378 {
7379 int i;
7380 unsigned long prev_piv = 0;
7381 enum maple_type type = mte_node_type(mas->node);
7382 void __rcu **slots = ma_slots(mte_to_node(mas->node), type);
7383 unsigned long *pivots = ma_pivots(mas_mn(mas), type);
7384
7385 for (i = 0; i < mt_slots[type]; i++) {
7386 unsigned long piv;
7387
7388 piv = mas_safe_pivot(mas, pivots, i, type);
7389
7390 if (!piv && (i != 0)) {
7391 pr_err("Missing node limit pivot at %p[%u]",
7392 mas_mn(mas), i);
7393 MAS_WARN_ON(mas, 1);
7394 }
7395
7396 if (prev_piv > piv) {
7397 pr_err("%p[%u] piv %lu < prev_piv %lu\n",
7398 mas_mn(mas), i, piv, prev_piv);
7399 MAS_WARN_ON(mas, piv < prev_piv);
7400 }
7401
7402 if (piv < mas->min) {
7403 pr_err("%p[%u] %lu < %lu\n", mas_mn(mas), i,
7404 piv, mas->min);
7405 MAS_WARN_ON(mas, piv < mas->min);
7406 }
7407 if (piv > mas->max) {
7408 pr_err("%p[%u] %lu > %lu\n", mas_mn(mas), i,
7409 piv, mas->max);
7410 MAS_WARN_ON(mas, piv > mas->max);
7411 }
7412 prev_piv = piv;
7413 if (piv == mas->max)
7414 break;
7415 }
7416
7417 if (mas_data_end(mas) != i) {
7418 pr_err("node%p: data_end %u != the last slot offset %u\n",
7419 mas_mn(mas), mas_data_end(mas), i);
7420 MT_BUG_ON(mas->tree, 1);
7421 }
7422
7423 for (i += 1; i < mt_slots[type]; i++) {
7424 void *entry = mas_slot(mas, slots, i);
7425
7426 if (entry && (i != mt_slots[type] - 1)) {
7427 pr_err("%p[%u] should not have entry %p\n", mas_mn(mas),
7428 i, entry);
7429 MT_BUG_ON(mas->tree, entry != NULL);
7430 }
7431
7432 if (i < mt_pivots[type]) {
7433 unsigned long piv = pivots[i];
7434
7435 if (!piv)
7436 continue;
7437
7438 pr_err("%p[%u] should not have piv %lu\n",
7439 mas_mn(mas), i, piv);
7440 MAS_WARN_ON(mas, i < mt_pivots[type] - 1);
7441 }
7442 }
7443 }
7444
mt_validate_nulls(struct maple_tree * mt)7445 static void mt_validate_nulls(struct maple_tree *mt)
7446 {
7447 void *entry, *last = (void *)1;
7448 unsigned char offset = 0;
7449 void __rcu **slots;
7450 MA_STATE(mas, mt, 0, 0);
7451
7452 mas_start(&mas);
7453 if (mas_is_none(&mas) || (mas.node == MAS_ROOT))
7454 return;
7455
7456 while (!mte_is_leaf(mas.node))
7457 mas_descend(&mas);
7458
7459 slots = ma_slots(mte_to_node(mas.node), mte_node_type(mas.node));
7460 do {
7461 entry = mas_slot(&mas, slots, offset);
7462 if (!last && !entry) {
7463 pr_err("Sequential nulls end at %p[%u]\n",
7464 mas_mn(&mas), offset);
7465 }
7466 MT_BUG_ON(mt, !last && !entry);
7467 last = entry;
7468 if (offset == mas_data_end(&mas)) {
7469 mas_next_node(&mas, mas_mn(&mas), ULONG_MAX);
7470 if (mas_is_none(&mas))
7471 return;
7472 offset = 0;
7473 slots = ma_slots(mte_to_node(mas.node),
7474 mte_node_type(mas.node));
7475 } else {
7476 offset++;
7477 }
7478
7479 } while (!mas_is_none(&mas));
7480 }
7481
7482 /*
7483 * validate a maple tree by checking:
7484 * 1. The limits (pivots are within mas->min to mas->max)
7485 * 2. The gap is correctly set in the parents
7486 */
mt_validate(struct maple_tree * mt)7487 void mt_validate(struct maple_tree *mt)
7488 {
7489 unsigned char end;
7490
7491 MA_STATE(mas, mt, 0, 0);
7492 rcu_read_lock();
7493 mas_start(&mas);
7494 if (!mas_searchable(&mas))
7495 goto done;
7496
7497 while (!mte_is_leaf(mas.node))
7498 mas_descend(&mas);
7499
7500 while (!mas_is_none(&mas)) {
7501 MAS_WARN_ON(&mas, mte_dead_node(mas.node));
7502 end = mas_data_end(&mas);
7503 if (MAS_WARN_ON(&mas, (end < mt_min_slot_count(mas.node)) &&
7504 (mas.max != ULONG_MAX))) {
7505 pr_err("Invalid size %u of %p\n", end, mas_mn(&mas));
7506 }
7507
7508 mas_validate_parent_slot(&mas);
7509 mas_validate_limits(&mas);
7510 mas_validate_child_slot(&mas);
7511 if (mt_is_alloc(mt))
7512 mas_validate_gaps(&mas);
7513 mas_dfs_postorder(&mas, ULONG_MAX);
7514 }
7515 mt_validate_nulls(mt);
7516 done:
7517 rcu_read_unlock();
7518
7519 }
7520 EXPORT_SYMBOL_GPL(mt_validate);
7521
mas_dump(const struct ma_state * mas)7522 void mas_dump(const struct ma_state *mas)
7523 {
7524 pr_err("MAS: tree=%p enode=%p ", mas->tree, mas->node);
7525 if (mas_is_none(mas))
7526 pr_err("(MAS_NONE) ");
7527 else if (mas_is_ptr(mas))
7528 pr_err("(MAS_ROOT) ");
7529 else if (mas_is_start(mas))
7530 pr_err("(MAS_START) ");
7531 else if (mas_is_paused(mas))
7532 pr_err("(MAS_PAUSED) ");
7533
7534 pr_err("[%u] index=%lx last=%lx\n", mas->offset, mas->index, mas->last);
7535 pr_err(" min=%lx max=%lx alloc=%p, depth=%u, flags=%x\n",
7536 mas->min, mas->max, mas->alloc, mas->depth, mas->mas_flags);
7537 if (mas->index > mas->last)
7538 pr_err("Check index & last\n");
7539 }
7540 EXPORT_SYMBOL_GPL(mas_dump);
7541
mas_wr_dump(const struct ma_wr_state * wr_mas)7542 void mas_wr_dump(const struct ma_wr_state *wr_mas)
7543 {
7544 pr_err("WR_MAS: node=%p r_min=%lx r_max=%lx\n",
7545 wr_mas->node, wr_mas->r_min, wr_mas->r_max);
7546 pr_err(" type=%u off_end=%u, node_end=%u, end_piv=%lx\n",
7547 wr_mas->type, wr_mas->offset_end, wr_mas->node_end,
7548 wr_mas->end_piv);
7549 }
7550 EXPORT_SYMBOL_GPL(mas_wr_dump);
7551
7552 #endif /* CONFIG_DEBUG_MAPLE_TREE */
7553