| /kernel/linux/linux-6.6/arch/arm64/kvm/hyp/nvhe/ |
| D | page_alloc.c | 25 * Order 2 1 0 28 * __find_buddy_nocheck(pool, page 0, order 0) => page 1 29 * __find_buddy_nocheck(pool, page 0, order 1) => page 2 30 * __find_buddy_nocheck(pool, page 1, order 0) => page 0 31 * __find_buddy_nocheck(pool, page 2, order 0) => page 3 35 unsigned short order) in __find_buddy_nocheck() argument 39 addr ^= (PAGE_SIZE << order); in __find_buddy_nocheck() 54 unsigned short order) in __find_buddy_avail() argument 56 struct hyp_page *buddy = __find_buddy_nocheck(pool, p, order); in __find_buddy_avail() 58 if (!buddy || buddy->order != order || buddy->refcount) in __find_buddy_avail() [all …]
|
| /kernel/linux/linux-6.6/scripts/atomic/ |
| D | gen-atomic-fallback.sh | 8 #gen_template_fallback(template, meta, pfx, name, sfx, order, atomic, int, args...) 16 local order="$1"; shift 28 #gen_order_fallback(meta, pfx, name, sfx, order, atomic, int, args...) 35 local order="$1"; shift 37 local tmpl_order=${order#_} 39 gen_template_fallback "${tmpl}" "${meta}" "${pfx}" "${name}" "${sfx}" "${order}" "$@" 42 #gen_proto_fallback(meta, pfx, name, sfx, order, atomic, int, args...) 49 local order="$1"; shift 51 local tmpl="$(find_fallback_template "${pfx}" "${name}" "${sfx}" "${order}")" 52 gen_template_fallback "${tmpl}" "${meta}" "${pfx}" "${name}" "${sfx}" "${order}" "$@" [all …]
|
| /kernel/linux/linux-6.6/include/trace/events/ |
| D | compaction.h | 168 int order, 172 TP_ARGS(order, gfp_mask, prio), 175 __field(int, order) 181 __entry->order = order; 186 TP_printk("order=%d gfp_mask=%s priority=%d", 187 __entry->order, 195 int order, 198 TP_ARGS(zone, order, ret), 203 __field(int, order) 210 __entry->order = order; [all …]
|
| D | vmscan.h | 68 TP_PROTO(int nid, int zid, int order), 70 TP_ARGS(nid, zid, order), 75 __field( int, order ) 81 __entry->order = order; 84 TP_printk("nid=%d order=%d", 86 __entry->order) 91 TP_PROTO(int nid, int zid, int order, gfp_t gfp_flags), 93 TP_ARGS(nid, zid, order, gfp_flags), 98 __field( int, order ) 105 __entry->order = order; [all …]
|
| D | kmem.h | 138 TP_PROTO(struct page *page, unsigned int order), 140 TP_ARGS(page, order), 144 __field( unsigned int, order ) 149 __entry->order = order; 152 TP_printk("page=%p pfn=0x%lx order=%d", 155 __entry->order) 172 TP_printk("page=%p pfn=0x%lx order=0", 179 TP_PROTO(struct page *page, unsigned int order, 182 TP_ARGS(page, order, gfp_flags, migratetype), 186 __field( unsigned int, order ) [all …]
|
| /kernel/linux/linux-5.10/include/trace/events/ |
| D | compaction.h | 173 int order, 177 TP_ARGS(order, gfp_mask, prio), 180 __field(int, order) 186 __entry->order = order; 191 TP_printk("order=%d gfp_mask=%s priority=%d", 192 __entry->order, 200 int order, 203 TP_ARGS(zone, order, ret), 208 __field(int, order) 215 __entry->order = order; [all …]
|
| D | kmem.h | 154 TP_PROTO(struct page *page, unsigned int order), 156 TP_ARGS(page, order), 160 __field( unsigned int, order ) 165 __entry->order = order; 168 TP_printk("page=%p pfn=%lu order=%d", 171 __entry->order) 188 TP_printk("page=%p pfn=%lu order=0", 195 TP_PROTO(struct page *page, unsigned int order, 198 TP_ARGS(page, order, gfp_flags, migratetype), 202 __field( unsigned int, order ) [all …]
|
| D | vmscan.h | 54 TP_PROTO(int nid, int zid, int order), 56 TP_ARGS(nid, zid, order), 61 __field( int, order ) 67 __entry->order = order; 70 TP_printk("nid=%d order=%d", 72 __entry->order) 77 TP_PROTO(int nid, int zid, int order, gfp_t gfp_flags), 79 TP_ARGS(nid, zid, order, gfp_flags), 84 __field( int, order ) 91 __entry->order = order; [all …]
|
| /kernel/linux/linux-6.6/drivers/net/ethernet/mellanox/mlx5/core/steering/ |
| D | dr_buddy.c | 29 /* Allocating max_order bitmaps, one for each order */ in mlx5dr_buddy_init() 39 /* In the beginning, we have only one order that is available for in mlx5dr_buddy_init() 75 unsigned int *order) in dr_buddy_find_free_seg() argument 88 "ICM Buddy: failed finding free mem for order %d\n", in dr_buddy_find_free_seg() 99 *order = order_iter; in dr_buddy_find_free_seg() 106 * @order: Order of the buddy to update. 110 * It uses the data structures of the buddy system in order to find the first 111 * area of free place, starting from the current order till the maximum order 120 unsigned int order, in mlx5dr_buddy_alloc_mem() argument 126 err = dr_buddy_find_free_seg(buddy, order, &seg, &order_iter); in mlx5dr_buddy_alloc_mem() [all …]
|
| /kernel/linux/linux-5.10/drivers/gpu/drm/i915/selftests/ |
| D | i915_buddy.c | 15 pr_err("block info: header=%llx, state=%u, order=%d, offset=%llx size=%llx root=%s buddy=%s\n", in __igt_dump_block() 202 unsigned int order; in igt_check_mm() local 218 order = i915_buddy_block_order(root); in igt_check_mm() 221 if (order != mm->max_order) { in igt_check_mm() 222 pr_err("max order root missing\n"); in igt_check_mm() 242 block = list_first_entry_or_null(&mm->free_list[order], in igt_check_mm() 246 pr_err("root mismatch at order=%u\n", order); in igt_check_mm() 307 int *order; in igt_buddy_alloc_smoke() local 320 order = i915_random_order(mm.max_order + 1, &prng); in igt_buddy_alloc_smoke() 321 if (!order) in igt_buddy_alloc_smoke() [all …]
|
| /kernel/linux/linux-6.6/drivers/gpu/drm/tests/ |
| D | drm_buddy_test.c | 21 static inline u64 get_size(int order, u64 chunk_size) in get_size() argument 23 return (1 << order) * chunk_size; in get_size() 49 …kunit_err(test, "block info: header=%llx, state=%u, order=%d, offset=%llx size=%llx root=%d buddy=… in __dump_block() 228 unsigned int order; in check_mm() local 244 order = drm_buddy_block_order(root); in check_mm() 247 if (order != mm->max_order) { in check_mm() 248 kunit_err(test, "max order root missing\n"); in check_mm() 268 block = list_first_entry_or_null(&mm->free_list[order], in check_mm() 271 kunit_err(test, "root mismatch at order=%u\n", order); in check_mm() 331 int order, top; in drm_test_buddy_alloc_pathological() local [all …]
|
| /kernel/linux/linux-6.6/mm/ |
| D | page_alloc.c | 233 static void __free_pages_ok(struct page *page, unsigned int order, 310 static bool page_contains_unaccepted(struct page *page, unsigned int order); 311 static void accept_page(struct page *page, unsigned int order); 312 static bool cond_accept_memory(struct zone *zone, unsigned int order); 337 _deferred_grow_zone(struct zone *zone, unsigned int order) in _deferred_grow_zone() argument 339 return deferred_grow_zone(zone, order); in _deferred_grow_zone() 526 static inline unsigned int order_to_pindex(int migratetype, int order) in order_to_pindex() argument 531 if (order > PAGE_ALLOC_COSTLY_ORDER) { in order_to_pindex() 532 VM_BUG_ON(order != pageblock_order); in order_to_pindex() 539 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER); in order_to_pindex() [all …]
|
| /kernel/linux/linux-6.6/drivers/gpu/drm/ttm/ |
| D | ttm_pool.c | 54 * @vaddr: original vaddr return for the mapping and order in the lower bits 78 /* Allocate pages of size 1 << order with the given gfp_flags */ 80 unsigned int order) in ttm_pool_alloc_page() argument 87 /* Don't set the __GFP_COMP flag for higher order allocations. in ttm_pool_alloc_page() 91 if (order) in ttm_pool_alloc_page() 96 p = alloc_pages_node(pool->nid, gfp_flags, order); in ttm_pool_alloc_page() 98 p->private = order; in ttm_pool_alloc_page() 106 if (order) in ttm_pool_alloc_page() 109 vaddr = dma_alloc_attrs(pool->dev, (1ULL << order) * PAGE_SIZE, in ttm_pool_alloc_page() 122 dma->vaddr = (unsigned long)vaddr | order; in ttm_pool_alloc_page() [all …]
|
| /kernel/linux/linux-6.6/lib/ |
| D | test_xarray.c | 72 unsigned order, void *entry, gfp_t gfp) in xa_store_order() argument 74 XA_STATE_ORDER(xas, xa, index, order); in xa_store_order() 177 unsigned int order; in check_xa_mark_1() local 207 for (order = 2; order < max_order; order++) { in check_xa_mark_1() 208 unsigned long base = round_down(index, 1UL << order); in check_xa_mark_1() 209 unsigned long next = base + (1UL << order); in check_xa_mark_1() 217 xa_store_order(xa, index, order, xa_mk_index(index), in check_xa_mark_1() 328 unsigned int order; in check_xa_shrink() local 353 for (order = 0; order < max_order; order++) { in check_xa_shrink() 354 unsigned long max = (1UL << order) - 1; in check_xa_shrink() [all …]
|
| /kernel/linux/linux-6.6/kernel/bpf/ |
| D | cgroup_iter.c | 13 * 1. Walk the descendants of a cgroup in pre-order. 14 * 2. Walk the descendants of a cgroup in post-order. 18 * For walking descendants, cgroup_iter can walk in either pre-order or 19 * post-order. For walking ancestors, the iter walks up from a cgroup to 40 * EOPNOTSUPP. In order to work around, the user may have to update their 54 int order; member 77 if (p->order == BPF_CGROUP_ITER_DESCENDANTS_PRE) in cgroup_iter_seq_start() 79 else if (p->order == BPF_CGROUP_ITER_DESCENDANTS_POST) in cgroup_iter_seq_start() 110 if (p->order == BPF_CGROUP_ITER_DESCENDANTS_PRE) in cgroup_iter_seq_next() 112 else if (p->order == BPF_CGROUP_ITER_DESCENDANTS_POST) in cgroup_iter_seq_next() [all …]
|
| /kernel/linux/linux-6.6/tools/testing/radix-tree/ |
| D | multiorder.c | 3 * multiorder.c: Multi-order radix tree entry testing 16 unsigned order) in item_insert_order() argument 18 XA_STATE_ORDER(xas, xa, index, order); in item_insert_order() 19 struct item *item = item_create(index, order); in item_insert_order() 42 int order[NUM_ENTRIES] = {1, 1, 2, 3, 4, 1, 0, 1, 3, 0, 7}; in multiorder_iteration() local 47 err = item_insert_order(xa, index[i], order[i]); in multiorder_iteration() 53 if (j <= (index[i] | ((1 << order[i]) - 1))) in multiorder_iteration() 58 int height = order[i] / XA_CHUNK_SHIFT; in multiorder_iteration() 60 unsigned long mask = (1UL << order[i]) - 1; in multiorder_iteration() 66 assert(item->order == order[i]); in multiorder_iteration() [all …]
|
| /kernel/liteos_a/kernel/base/vm/ |
| D | los_vm_phys.c | 160 STATIC VOID OsVmPhysFreeListAddUnsafe(LosVmPage *page, UINT8 order) in OsVmPhysFreeListAddUnsafe() argument 169 page->order = order; in OsVmPhysFreeListAddUnsafe() 172 list = &seg->freeList[order]; in OsVmPhysFreeListAddUnsafe() 182 if ((page->segID >= VM_PHYS_SEG_MAX) || (page->order >= VM_LIST_ORDER_MAX)) { in OsVmPhysFreeListDelUnsafe() 183 LOS_Panic("The page segment id(%u) or order(%u) is invalid\n", page->segID, page->order); in OsVmPhysFreeListDelUnsafe() 187 list = &seg->freeList[page->order]; in OsVmPhysFreeListDelUnsafe() 190 page->order = VM_LIST_ORDER_MAX; in OsVmPhysFreeListDelUnsafe() 195 UINT32 order; in OsVmPhysPagesSpiltUnsafe() local 198 for (order = newOrder; order > oldOrder;) { in OsVmPhysPagesSpiltUnsafe() 199 order--; in OsVmPhysPagesSpiltUnsafe() [all …]
|
| /kernel/linux/linux-5.10/arch/arm/lib/ |
| D | lib1funcs.S | 106 .macro ARM_DIV2_ORDER divisor, order argument 110 clz \order, \divisor 111 rsb \order, \order, #31 117 movhs \order, #16 118 movlo \order, #0 122 addhs \order, \order, #8 126 addhs \order, \order, #4 129 addhi \order, \order, #3 130 addls \order, \order, \divisor, lsr #1 137 .macro ARM_MOD_BODY dividend, divisor, order, spare [all …]
|
| /kernel/linux/linux-6.6/arch/arm/lib/ |
| D | lib1funcs.S | 106 .macro ARM_DIV2_ORDER divisor, order argument 110 clz \order, \divisor 111 rsb \order, \order, #31 117 movhs \order, #16 118 movlo \order, #0 122 addhs \order, \order, #8 126 addhs \order, \order, #4 129 addhi \order, \order, #3 130 addls \order, \order, \divisor, lsr #1 137 .macro ARM_MOD_BODY dividend, divisor, order, spare [all …]
|
| /kernel/linux/linux-6.6/mm/kmsan/ |
| D | init.c | 105 * by their order: when kmsan_memblock_free_pages() is called for the first 106 * time with a certain order, it is reserved as a shadow block, for the second 109 * after which held_back[order] can be used again. 114 bool kmsan_memblock_free_pages(struct page *page, unsigned int order) in kmsan_memblock_free_pages() argument 118 if (!held_back[order].shadow) { in kmsan_memblock_free_pages() 119 held_back[order].shadow = page; in kmsan_memblock_free_pages() 122 if (!held_back[order].origin) { in kmsan_memblock_free_pages() 123 held_back[order].origin = page; in kmsan_memblock_free_pages() 126 shadow = held_back[order].shadow; in kmsan_memblock_free_pages() 127 origin = held_back[order].origin; in kmsan_memblock_free_pages() [all …]
|
| /kernel/linux/linux-5.10/lib/ |
| D | test_xarray.c | 72 unsigned order, void *entry, gfp_t gfp) in xa_store_order() argument 74 XA_STATE_ORDER(xas, xa, index, order); in xa_store_order() 177 unsigned int order; in check_xa_mark_1() local 207 for (order = 2; order < max_order; order++) { in check_xa_mark_1() 208 unsigned long base = round_down(index, 1UL << order); in check_xa_mark_1() 209 unsigned long next = base + (1UL << order); in check_xa_mark_1() 217 xa_store_order(xa, index, order, xa_mk_index(index), in check_xa_mark_1() 328 unsigned int order; in check_xa_shrink() local 353 for (order = 0; order < max_order; order++) { in check_xa_shrink() 354 unsigned long max = (1UL << order) - 1; in check_xa_shrink() [all …]
|
| /kernel/linux/linux-5.10/include/linux/ |
| D | compaction.h | 61 * Number of free order-0 pages that should be available above given watermark 65 static inline unsigned long compact_gap(unsigned int order) in compact_gap() argument 69 * free scanner may have up to 1 << order pages on its list and then in compact_gap() 70 * try to split an (order - 1) free page. At that point, a gap of in compact_gap() 71 * 1 << order might not be enough, so it's safer to require twice that in compact_gap() 80 return 2UL << order; in compact_gap() 91 extern unsigned int extfrag_for_order(struct zone *zone, unsigned int order); 92 extern int fragmentation_index(struct zone *zone, unsigned int order); 94 unsigned int order, unsigned int alloc_flags, 98 extern enum compact_result compaction_suitable(struct zone *zone, int order, [all …]
|
| /kernel/linux/linux-5.10/tools/testing/radix-tree/ |
| D | multiorder.c | 3 * multiorder.c: Multi-order radix tree entry testing 16 unsigned order) in item_insert_order() argument 18 XA_STATE_ORDER(xas, xa, index, order); in item_insert_order() 19 struct item *item = item_create(index, order); in item_insert_order() 42 int order[NUM_ENTRIES] = {1, 1, 2, 3, 4, 1, 0, 1, 3, 0, 7}; in multiorder_iteration() local 47 err = item_insert_order(xa, index[i], order[i]); in multiorder_iteration() 53 if (j <= (index[i] | ((1 << order[i]) - 1))) in multiorder_iteration() 58 int height = order[i] / XA_CHUNK_SHIFT; in multiorder_iteration() 60 unsigned long mask = (1UL << order[i]) - 1; in multiorder_iteration() 66 assert(item->order == order[i]); in multiorder_iteration() [all …]
|
| /kernel/linux/linux-5.10/drivers/gpu/drm/lib/ |
| D | drm_random.c | 15 void drm_random_reorder(unsigned int *order, unsigned int count, in drm_random_reorder() argument 23 swap(order[i], order[j]); in drm_random_reorder() 30 unsigned int *order, i; in drm_random_order() local 32 order = kmalloc_array(count, sizeof(*order), GFP_KERNEL); in drm_random_order() 33 if (!order) in drm_random_order() 34 return order; in drm_random_order() 37 order[i] = i; in drm_random_order() 39 drm_random_reorder(order, count, state); in drm_random_order() 40 return order; in drm_random_order()
|
| /kernel/linux/linux-6.6/drivers/gpu/drm/lib/ |
| D | drm_random.c | 16 void drm_random_reorder(unsigned int *order, unsigned int count, in drm_random_reorder() argument 24 swap(order[i], order[j]); in drm_random_reorder() 31 unsigned int *order, i; in drm_random_order() local 33 order = kmalloc_array(count, sizeof(*order), GFP_KERNEL); in drm_random_order() 34 if (!order) in drm_random_order() 35 return order; in drm_random_order() 38 order[i] = i; in drm_random_order() 40 drm_random_reorder(order, count, state); in drm_random_order() 41 return order; in drm_random_order()
|