Home
last modified time | relevance | path

Searched refs:order (Results 1 – 25 of 42) sorted by relevance

12

/include/trace/events/
Dcompaction.h173 int order,
177 TP_ARGS(order, gfp_mask, prio),
180 __field(int, order)
186 __entry->order = order;
192 __entry->order,
200 int order,
203 TP_ARGS(zone, order, ret),
208 __field(int, order)
215 __entry->order = order;
222 __entry->order,
[all …]
Dkmem.h154 TP_PROTO(struct page *page, unsigned int order),
156 TP_ARGS(page, order),
160 __field( unsigned int, order )
165 __entry->order = order;
171 __entry->order)
195 TP_PROTO(struct page *page, unsigned int order,
198 TP_ARGS(page, order, gfp_flags, migratetype),
202 __field( unsigned int, order )
209 __entry->order = order;
217 __entry->order,
[all …]
Dvmscan.h54 TP_PROTO(int nid, int zid, int order),
56 TP_ARGS(nid, zid, order),
61 __field( int, order )
67 __entry->order = order;
72 __entry->order)
77 TP_PROTO(int nid, int zid, int order, gfp_t gfp_flags),
79 TP_ARGS(nid, zid, order, gfp_flags),
84 __field( int, order )
91 __entry->order = order;
97 __entry->order,
[all …]
Doom.h35 int order,
42 TP_ARGS(zoneref, order, reclaimable, available, min_wmark, no_progress_loops, wmark_check),
47 __field( int, order)
58 __entry->order = order;
68 __entry->order,
157 TP_PROTO(int order,
164 TP_ARGS(order, priority, result, retries, max_retries, ret),
167 __field( int, order)
176 __entry->order = order;
185 __entry->order,
/include/linux/
Dpage_owner.h11 extern void __reset_page_owner(struct page *page, unsigned int order);
13 unsigned int order, gfp_t gfp_mask);
14 extern void __split_page_owner(struct page *page, unsigned int order);
21 static inline void reset_page_owner(struct page *page, unsigned int order) in reset_page_owner() argument
24 __reset_page_owner(page, order); in reset_page_owner()
28 unsigned int order, gfp_t gfp_mask) in set_page_owner() argument
31 __set_page_owner(page, order, gfp_mask); in set_page_owner()
34 static inline void split_page_owner(struct page *page, unsigned int order) in split_page_owner() argument
37 __split_page_owner(page, order); in split_page_owner()
55 static inline void reset_page_owner(struct page *page, unsigned int order) in reset_page_owner() argument
[all …]
Dcompaction.h68 static inline unsigned long compact_gap(unsigned int order) in compact_gap() argument
83 return 2UL << order; in compact_gap()
93 extern int fragmentation_index(struct zone *zone, unsigned int order);
95 unsigned int order, unsigned int alloc_flags,
99 extern enum compact_result compaction_suitable(struct zone *zone, int order,
102 extern void defer_compaction(struct zone *zone, int order);
103 extern bool compaction_deferred(struct zone *zone, int order);
104 extern void compaction_defer_reset(struct zone *zone, int order,
106 extern bool compaction_restarting(struct zone *zone, int order);
180 bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
[all …]
Dgfp.h483 static inline void arch_free_page(struct page *page, int order) { } in arch_free_page() argument
486 static inline void arch_alloc_page(struct page *page, int order) { } in arch_alloc_page() argument
490 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
494 __alloc_pages(gfp_t gfp_mask, unsigned int order, int preferred_nid) in __alloc_pages() argument
496 return __alloc_pages_nodemask(gfp_mask, order, preferred_nid, NULL); in __alloc_pages()
504 __alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order) in __alloc_pages_node() argument
509 return __alloc_pages(gfp_mask, order, nid); in __alloc_pages_node()
518 unsigned int order) in alloc_pages_node() argument
523 return __alloc_pages_node(nid, gfp_mask, order); in alloc_pages_node()
527 extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order);
[all …]
Dbitops.h52 int order; in get_bitmask_order() local
54 order = fls(count); in get_bitmask_order()
55 return order; /* We could be slightly more clever with -1 here... */ in get_bitmask_order()
176 int order; in get_count_order() local
178 order = fls(count) - 1; in get_count_order()
180 order++; in get_count_order()
181 return order; in get_count_order()
Dmempool.h99 static inline int mempool_init_page_pool(mempool_t *pool, int min_nr, int order) in mempool_init_page_pool() argument
102 mempool_free_pages, (void *)(long)order); in mempool_init_page_pool()
105 static inline mempool_t *mempool_create_page_pool(int min_nr, int order) in mempool_create_page_pool() argument
108 (void *)(long)order); in mempool_create_page_pool()
Dkasan.h43 void kasan_alloc_pages(struct page *page, unsigned int order);
44 void kasan_free_pages(struct page *page, unsigned int order);
99 static inline void kasan_alloc_pages(struct page *page, unsigned int order) {} in kasan_alloc_pages() argument
100 static inline void kasan_free_pages(struct page *page, unsigned int order) {} in kasan_free_pages() argument
Dmemcontrol.h745 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
1132 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, in mem_cgroup_soft_limit_reclaim() argument
1370 int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order);
1371 void __memcg_kmem_uncharge(struct page *page, int order);
1372 int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
1397 static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order) in memcg_kmem_charge() argument
1400 return __memcg_kmem_charge(page, gfp, order); in memcg_kmem_charge()
1404 static inline void memcg_kmem_uncharge(struct page *page, int order) in memcg_kmem_uncharge() argument
1407 __memcg_kmem_uncharge(page, order); in memcg_kmem_uncharge()
1411 int order, struct mem_cgroup *memcg) in memcg_kmem_charge_memcg() argument
[all …]
Dxarray.h1325 #define XA_STATE_ORDER(name, array, index, order) \ argument
1327 (index >> order) << order, \
1328 order - (order % XA_CHUNK_SHIFT), \
1329 (1U << (order % XA_CHUNK_SHIFT)) - 1)
1518 unsigned int order) in xas_set_order() argument
1521 xas->xa_index = order < BITS_PER_LONG ? (index >> order) << order : 0; in xas_set_order()
1522 xas->xa_shift = order - (order % XA_CHUNK_SHIFT); in xas_set_order()
1523 xas->xa_sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1; in xas_set_order()
1526 BUG_ON(order > 0); in xas_set_order()
Dmigrate.h38 unsigned int order = 0; in new_page_nodemask() local
47 order = HPAGE_PMD_ORDER; in new_page_nodemask()
53 new_page = __alloc_pages_nodemask(gfp_mask, order, in new_page_nodemask()
Ddma-contiguous.h112 unsigned int order, bool no_warn);
147 unsigned int order, bool no_warn) in dma_alloc_from_contiguous() argument
Dslab.h470 extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __…
473 extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignm…
476 kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) in kmalloc_order_trace() argument
478 return kmalloc_order(size, flags, order); in kmalloc_order_trace()
484 unsigned int order = get_order(size); in kmalloc_large() local
485 return kmalloc_order_trace(size, flags, order); in kmalloc_large()
Dhugetlb.h328 unsigned int order; member
370 void __init hugetlb_add_hstate(unsigned order);
407 return (unsigned long)PAGE_SIZE << h->order; in huge_page_size()
421 return h->order; in huge_page_order()
426 return h->order + PAGE_SHIFT; in huge_page_shift()
436 return 1 << h->order; in pages_per_huge_page()
462 return hstates[index].order + PAGE_SHIFT; in hstate_index_to_shift()
Doom.h46 const int order; member
Dscatterlist.h285 unsigned int order, bool chainable,
289 void sgl_free_n_order(struct scatterlist *sgl, int nents, int order);
290 void sgl_free_order(struct scatterlist *sgl, int order);
Dmmzone.h85 #define for_each_migratetype_order(order, type) \ argument
86 for (order = 0; order < MAX_ORDER; order++) \
816 void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order,
818 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
821 bool zone_watermark_ok(struct zone *z, unsigned int order,
824 bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
Dbitmap.h195 extern int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order);
196 extern void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order);
197 extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order);
/include/drm/
Ddrm_hashtab.h49 u8 order; member
52 int drm_ht_create(struct drm_open_hash *ht, unsigned int order);
/include/xen/
Dxen-ops.h46 int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
50 void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order);
53 unsigned int order, in xen_create_contiguous_region() argument
61 unsigned int order) { } in xen_destroy_contiguous_region() argument
/include/media/drv-intf/
Dsoc_mediabus.h78 enum soc_mbus_order order; member
/include/sound/
Dsoc-component.h26 #define for_each_comp_order(order) \ argument
27 for (order = SND_SOC_COMP_ORDER_FIRST; \
28 order <= SND_SOC_COMP_ORDER_LAST; \
29 order++)
/include/xen/arm/
Dpage.h111 unsigned long xen_get_swiotlb_free_pages(unsigned int order);

12