Home
last modified time | relevance | path

Searched refs:order (Results 1 – 25 of 1762) sorted by relevance

12345678910>>...71

/kernel/linux/linux-5.10/include/trace/events/
Dcompaction.h173 int order,
177 TP_ARGS(order, gfp_mask, prio),
180 __field(int, order)
186 __entry->order = order;
192 __entry->order,
200 int order,
203 TP_ARGS(zone, order, ret),
208 __field(int, order)
215 __entry->order = order;
222 __entry->order,
[all …]
Dkmem.h154 TP_PROTO(struct page *page, unsigned int order),
156 TP_ARGS(page, order),
160 __field( unsigned int, order )
165 __entry->order = order;
171 __entry->order)
195 TP_PROTO(struct page *page, unsigned int order,
198 TP_ARGS(page, order, gfp_flags, migratetype),
202 __field( unsigned int, order )
209 __entry->order = order;
217 __entry->order,
[all …]
Dvmscan.h54 TP_PROTO(int nid, int zid, int order),
56 TP_ARGS(nid, zid, order),
61 __field( int, order )
67 __entry->order = order;
72 __entry->order)
77 TP_PROTO(int nid, int zid, int order, gfp_t gfp_flags),
79 TP_ARGS(nid, zid, order, gfp_flags),
84 __field( int, order )
91 __entry->order = order;
97 __entry->order,
[all …]
Doom.h35 int order,
42 TP_ARGS(zoneref, order, reclaimable, available, min_wmark, no_progress_loops, wmark_check),
47 __field( int, order)
58 __entry->order = order;
68 __entry->order,
157 TP_PROTO(int order,
164 TP_ARGS(order, priority, result, retries, max_retries, ret),
167 __field( int, order)
176 __entry->order = order;
185 __entry->order,
/kernel/linux/linux-5.10/include/linux/
Dcompaction.h65 static inline unsigned long compact_gap(unsigned int order) in compact_gap() argument
80 return 2UL << order; in compact_gap()
91 extern unsigned int extfrag_for_order(struct zone *zone, unsigned int order);
92 extern int fragmentation_index(struct zone *zone, unsigned int order);
94 unsigned int order, unsigned int alloc_flags,
98 extern enum compact_result compaction_suitable(struct zone *zone, int order,
101 extern void defer_compaction(struct zone *zone, int order);
102 extern bool compaction_deferred(struct zone *zone, int order);
103 extern void compaction_defer_reset(struct zone *zone, int order,
105 extern bool compaction_restarting(struct zone *zone, int order);
[all …]
Dgfp.h503 static inline void arch_free_page(struct page *page, int order) { } in arch_free_page() argument
506 static inline void arch_alloc_page(struct page *page, int order) { } in arch_alloc_page() argument
516 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
520 __alloc_pages(gfp_t gfp_mask, unsigned int order, int preferred_nid) in __alloc_pages() argument
522 return __alloc_pages_nodemask(gfp_mask, order, preferred_nid, NULL); in __alloc_pages()
530 __alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order) in __alloc_pages_node() argument
535 return __alloc_pages(gfp_mask, order, nid); in __alloc_pages_node()
544 unsigned int order) in alloc_pages_node() argument
549 return __alloc_pages_node(nid, gfp_mask, order); in alloc_pages_node()
553 extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order);
[all …]
Dpage_owner.h11 extern void __reset_page_owner(struct page *page, unsigned int order);
13 unsigned int order, gfp_t gfp_mask);
21 static inline void reset_page_owner(struct page *page, unsigned int order) in reset_page_owner() argument
24 __reset_page_owner(page, order); in reset_page_owner()
28 unsigned int order, gfp_t gfp_mask) in set_page_owner() argument
31 __set_page_owner(page, order, gfp_mask); in set_page_owner()
55 static inline void reset_page_owner(struct page *page, unsigned int order) in reset_page_owner() argument
59 unsigned int order, gfp_t gfp_mask) in set_page_owner() argument
63 unsigned int order) in split_page_owner() argument
/kernel/linux/linux-5.10/drivers/gpu/drm/lib/
Ddrm_random.c15 void drm_random_reorder(unsigned int *order, unsigned int count, in drm_random_reorder() argument
23 swap(order[i], order[j]); in drm_random_reorder()
30 unsigned int *order, i; in drm_random_order() local
32 order = kmalloc_array(count, sizeof(*order), GFP_KERNEL); in drm_random_order()
33 if (!order) in drm_random_order()
34 return order; in drm_random_order()
37 order[i] = i; in drm_random_order()
39 drm_random_reorder(order, count, state); in drm_random_order()
40 return order; in drm_random_order()
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/selftests/
Di915_buddy.c202 unsigned int order; in igt_check_mm() local
218 order = i915_buddy_block_order(root); in igt_check_mm()
221 if (order != mm->max_order) { in igt_check_mm()
242 block = list_first_entry_or_null(&mm->free_list[order], in igt_check_mm()
246 pr_err("root mismatch at order=%u\n", order); in igt_check_mm()
307 int *order; in igt_buddy_alloc_smoke() local
320 order = i915_random_order(mm.max_order + 1, &prng); in igt_buddy_alloc_smoke()
321 if (!order) in igt_buddy_alloc_smoke()
326 int max_order = order[i]; in igt_buddy_alloc_smoke()
329 int order; in igt_buddy_alloc_smoke() local
[all …]
Di915_random.c70 void i915_random_reorder(unsigned int *order, unsigned int count, in i915_random_reorder() argument
73 i915_prandom_shuffle(order, sizeof(*order), count, state); in i915_random_reorder()
78 unsigned int *order, i; in i915_random_order() local
80 order = kmalloc_array(count, sizeof(*order), in i915_random_order()
82 if (!order) in i915_random_order()
83 return order; in i915_random_order()
86 order[i] = i; in i915_random_order()
88 i915_random_reorder(order, count, state); in i915_random_order()
89 return order; in i915_random_order()
Di915_syncmap.c274 unsigned int pass, order; in igt_syncmap_join_above() local
296 for (order = 0; order < 64; order += SHIFT) { in igt_syncmap_join_above()
297 u64 context = BIT_ULL(order); in igt_syncmap_join_above()
335 unsigned int step, order, idx; in igt_syncmap_join_below() local
345 for (order = 64 - SHIFT; order > 0; order -= SHIFT) { in igt_syncmap_join_below()
346 u64 context = step * BIT_ULL(order); in igt_syncmap_join_below()
354 context, order, step, sync->height, sync->prefix); in igt_syncmap_join_below()
362 for (order = SHIFT; order < 64; order += SHIFT) { in igt_syncmap_join_below()
363 u64 context = step * BIT_ULL(order); in igt_syncmap_join_below()
367 context, order, step); in igt_syncmap_join_below()
[all …]
/kernel/linux/linux-5.10/drivers/media/pci/cx18/
Dcx18-mailbox.c230 static void epu_dma_done(struct cx18 *cx, struct cx18_in_work_order *order) in epu_dma_done() argument
239 mb = &order->mb; in epu_dma_done()
246 (order->flags & CX18_F_EWO_MB_STALE_UPON_RECEIPT) ? in epu_dma_done()
252 mdl_ack = order->mdl_ack; in epu_dma_done()
276 if ((order->flags & CX18_F_EWO_MB_STALE_UPON_RECEIPT) && in epu_dma_done()
323 static void epu_debug(struct cx18 *cx, struct cx18_in_work_order *order) in epu_debug() argument
326 char *str = order->str; in epu_debug()
328 CX18_DEBUG_INFO("%x %s\n", order->mb.args[0], str); in epu_debug()
334 static void epu_cmd(struct cx18 *cx, struct cx18_in_work_order *order) in epu_cmd() argument
336 switch (order->rpu) { in epu_cmd()
[all …]
/kernel/linux/linux-5.10/mm/
Dcompaction.c48 #define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order)) argument
49 #define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order)) argument
89 unsigned int i, order, nr_pages; in split_map_pages() local
96 order = page_private(page); in split_map_pages()
97 nr_pages = 1 << order; in split_map_pages()
99 post_alloc_hook(page, order, __GFP_MOVABLE); in split_map_pages()
100 if (order) in split_map_pages()
101 split_page(page, order); in split_map_pages()
160 void defer_compaction(struct zone *zone, int order) in defer_compaction() argument
165 if (order < zone->compact_order_failed) in defer_compaction()
[all …]
Dpage_alloc.c282 static void __free_pages_ok(struct page *page, unsigned int order,
419 static inline void kasan_free_nondeferred_pages(struct page *page, int order) in kasan_free_nondeferred_pages() argument
422 kasan_free_pages(page, order); in kasan_free_nondeferred_pages()
703 void prep_compound_page(struct page *page, unsigned int order) in prep_compound_page() argument
706 int nr_pages = 1 << order; in prep_compound_page()
717 set_compound_order(page, order); in prep_compound_page()
768 unsigned int order, int migratetype) in set_page_guard() argument
773 if (order >= debug_guardpage_minorder()) in set_page_guard()
778 set_page_private(page, order); in set_page_guard()
780 __mod_zone_freepage_state(zone, -(1 << order), migratetype); in set_page_guard()
[all …]
/kernel/linux/linux-5.10/tools/testing/radix-tree/
Dmultiorder.c16 unsigned order) in item_insert_order() argument
18 XA_STATE_ORDER(xas, xa, index, order); in item_insert_order()
19 struct item *item = item_create(index, order); in item_insert_order()
42 int order[NUM_ENTRIES] = {1, 1, 2, 3, 4, 1, 0, 1, 3, 0, 7}; in multiorder_iteration() local
47 err = item_insert_order(xa, index[i], order[i]); in multiorder_iteration()
53 if (j <= (index[i] | ((1 << order[i]) - 1))) in multiorder_iteration()
58 int height = order[i] / XA_CHUNK_SHIFT; in multiorder_iteration()
60 unsigned long mask = (1UL << order[i]) - 1; in multiorder_iteration()
66 assert(item->order == order[i]); in multiorder_iteration()
82 int order[MT_NUM_ENTRIES] = {1, 0, 2, 4, 3, 1, 3, 0, 7}; in multiorder_tagged_iteration() local
[all …]
Diteration_check.c25 int order; in my_item_insert() local
29 for (order = max_order; order >= 0; order--) { in my_item_insert()
30 xas_set_order(&xas, index, order); in my_item_insert()
31 item->order = order; in my_item_insert()
41 if (order < 0) in my_item_insert()
165 void iteration_test(unsigned order, unsigned test_duration) in iteration_test() argument
170 order > 0 ? "multiorder " : "", test_duration); in iteration_test()
172 max_order = order; in iteration_test()
/kernel/linux/linux-5.10/Documentation/trace/postprocess/
Dtrace-vmscan-postprocess.pl317 my $order = $1;
318 $perprocesspid{$process_pid}->{MM_VMSCAN_DIRECT_RECLAIM_BEGIN_PERORDER}[$order]++;
319 $perprocesspid{$process_pid}->{STATE_DIRECT_ORDER} = $order;
328 my $order = $perprocesspid{$process_pid}->{STATE_DIRECT_ORDER};
330 $perprocesspid{$process_pid}->{HIGH_DIRECT_RECLAIM_LATENCY}[$index] = "$order-$latency";
341 my $order = $2;
342 $perprocesspid{$process_pid}->{STATE_KSWAPD_ORDER} = $order;
347 $perprocesspid{$process_pid}->{MM_VMSCAN_KSWAPD_WAKE_PERORDER}[$order]++;
350 $perprocesspid{$process_pid}->{HIGH_KSWAPD_REWAKEUP_PERORDER}[$order]++;
360 my $order = $perprocesspid{$process_pid}->{STATE_KSWAPD_ORDER};
[all …]
/kernel/liteos_a/kernel/base/vm/
Dlos_vm_phys.c160 STATIC VOID OsVmPhysFreeListAddUnsafe(LosVmPage *page, UINT8 order) in OsVmPhysFreeListAddUnsafe() argument
169 page->order = order; in OsVmPhysFreeListAddUnsafe()
172 list = &seg->freeList[order]; in OsVmPhysFreeListAddUnsafe()
182 if ((page->segID >= VM_PHYS_SEG_MAX) || (page->order >= VM_LIST_ORDER_MAX)) { in OsVmPhysFreeListDelUnsafe()
183 LOS_Panic("The page segment id(%u) or order(%u) is invalid\n", page->segID, page->order); in OsVmPhysFreeListDelUnsafe()
187 list = &seg->freeList[page->order]; in OsVmPhysFreeListDelUnsafe()
190 page->order = VM_LIST_ORDER_MAX; in OsVmPhysFreeListDelUnsafe()
195 UINT32 order; in OsVmPhysPagesSpiltUnsafe() local
198 for (order = newOrder; order > oldOrder;) { in OsVmPhysPagesSpiltUnsafe()
199 order--; in OsVmPhysPagesSpiltUnsafe()
[all …]
/kernel/linux/linux-5.10/arch/s390/mm/
Dpage-states.c71 static inline void set_page_unused(struct page *page, int order) in set_page_unused() argument
75 for (i = 0; i < (1 << order); i++) in set_page_unused()
82 static inline void set_page_stable_dat(struct page *page, int order) in set_page_stable_dat() argument
86 for (i = 0; i < (1 << order); i++) in set_page_stable_dat()
93 static inline void set_page_stable_nodat(struct page *page, int order) in set_page_stable_nodat() argument
97 for (i = 0; i < (1 << order); i++) in set_page_stable_nodat()
208 void arch_free_page(struct page *page, int order) in arch_free_page() argument
212 set_page_unused(page, order); in arch_free_page()
215 void arch_alloc_page(struct page *page, int order) in arch_alloc_page() argument
220 set_page_stable_dat(page, order); in arch_alloc_page()
[all …]
/kernel/linux/linux-5.10/lib/
Dtest_xarray.c72 unsigned order, void *entry, gfp_t gfp) in xa_store_order() argument
74 XA_STATE_ORDER(xas, xa, index, order); in xa_store_order()
177 unsigned int order; in check_xa_mark_1() local
207 for (order = 2; order < max_order; order++) { in check_xa_mark_1()
208 unsigned long base = round_down(index, 1UL << order); in check_xa_mark_1()
209 unsigned long next = base + (1UL << order); in check_xa_mark_1()
217 xa_store_order(xa, index, order, xa_mk_index(index), in check_xa_mark_1()
328 unsigned int order; in check_xa_shrink() local
353 for (order = 0; order < max_order; order++) { in check_xa_shrink()
354 unsigned long max = (1UL << order) - 1; in check_xa_shrink()
[all …]
/kernel/linux/linux-5.10/arch/c6x/mm/
Ddma-coherent.c43 static inline u32 __alloc_dma_pages(int order) in __alloc_dma_pages() argument
49 pos = bitmap_find_free_region(dma_bitmap, dma_pages, order); in __alloc_dma_pages()
55 static void __free_dma_pages(u32 addr, int order) in __free_dma_pages() argument
60 if (addr < dma_base || (pos + (1 << order)) >= dma_pages) { in __free_dma_pages()
66 bitmap_release_region(dma_bitmap, pos, order); in __free_dma_pages()
79 int order; in arch_dma_alloc() local
84 order = get_count_order(((size - 1) >> PAGE_SHIFT) + 1); in arch_dma_alloc()
86 paddr = __alloc_dma_pages(order); in arch_dma_alloc()
95 memset(ret, 0, 1 << order); in arch_dma_alloc()
105 int order; in arch_dma_free() local
[all …]
/kernel/linux/linux-5.10/drivers/gpu/drm/nouveau/nvkm/subdev/therm/
Dgk104.c34 const struct gk104_clkgate_engine_info *order = therm->clkgate_order; in gk104_clkgate_enable() local
38 for (i = 0; order[i].engine != NVKM_SUBDEV_NR; i++) { in gk104_clkgate_enable()
39 if (!nvkm_device_subdev(dev, order[i].engine)) in gk104_clkgate_enable()
42 nvkm_mask(dev, 0x20200 + order[i].offset, 0xff00, 0x4500); in gk104_clkgate_enable()
50 for (i = 0; order[i].engine != NVKM_SUBDEV_NR; i++) { in gk104_clkgate_enable()
51 if (!nvkm_device_subdev(dev, order[i].engine)) in gk104_clkgate_enable()
54 nvkm_mask(dev, 0x20200 + order[i].offset, 0x00ff, 0x0045); in gk104_clkgate_enable()
63 const struct gk104_clkgate_engine_info *order = therm->clkgate_order; in gk104_clkgate_fini() local
67 for (i = 0; order[i].engine != NVKM_SUBDEV_NR; i++) { in gk104_clkgate_fini()
68 if (!nvkm_device_subdev(dev, order[i].engine)) in gk104_clkgate_fini()
[all …]
/kernel/linux/linux-5.10/arch/arm/lib/
Dlib1funcs.S106 .macro ARM_DIV2_ORDER divisor, order argument
110 clz \order, \divisor
111 rsb \order, \order, #31
117 movhs \order, #16
118 movlo \order, #0
122 addhs \order, \order, #8
126 addhs \order, \order, #4
129 addhi \order, \order, #3
130 addls \order, \order, \divisor, lsr #1
137 .macro ARM_MOD_BODY dividend, divisor, order, spare
[all …]
/kernel/linux/linux-5.10/drivers/staging/android/ion/
Dion_page_pool.c19 return alloc_pages(pool->gfp_mask, pool->order); in ion_page_pool_alloc_pages()
25 __free_pages(page, pool->order); in ion_page_pool_free_pages()
40 1 << pool->order); in ion_page_pool_add()
60 -(1 << pool->order)); in ion_page_pool_remove()
85 BUG_ON(pool->order != compound_order(page)); in ion_page_pool_free()
97 return count << pool->order; in ion_page_pool_total()
128 freed += (1 << pool->order); in ion_page_pool_shrink()
134 struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order) in ion_page_pool_create() argument
145 pool->order = order; in ion_page_pool_create()
147 plist_node_init(&pool->list, order); in ion_page_pool_create()
/kernel/linux/linux-5.10/kernel/locking/
Dtest-ww_mutex.c351 int *order; in get_random_order() local
354 order = kmalloc_array(count, sizeof(*order), GFP_KERNEL); in get_random_order()
355 if (!order) in get_random_order()
356 return order; in get_random_order()
359 order[n] = n; in get_random_order()
364 tmp = order[n]; in get_random_order()
365 order[n] = order[r]; in get_random_order()
366 order[r] = tmp; in get_random_order()
370 return order; in get_random_order()
384 int *order; in stress_inorder_work() local
[all …]

12345678910>>...71