Home
last modified time | relevance | path

Searched refs:order (Results 1 – 25 of 44) sorted by relevance

12

/include/trace/events/
Dcompaction.h173 int order,
177 TP_ARGS(order, gfp_mask, prio),
180 __field(int, order)
186 __entry->order = order;
192 __entry->order,
200 int order,
203 TP_ARGS(zone, order, ret),
208 __field(int, order)
215 __entry->order = order;
222 __entry->order,
[all …]
Dvmscan.h54 TP_PROTO(int nid, int zid, int order),
56 TP_ARGS(nid, zid, order),
61 __field( int, order )
67 __entry->order = order;
72 __entry->order)
77 TP_PROTO(int nid, int zid, int order, gfp_t gfp_flags),
79 TP_ARGS(nid, zid, order, gfp_flags),
84 __field( int, order )
91 __entry->order = order;
97 __entry->order,
[all …]
Dkmem.h162 TP_PROTO(struct page *page, unsigned int order),
164 TP_ARGS(page, order),
168 __field( unsigned int, order )
173 __entry->order = order;
179 __entry->order)
203 TP_PROTO(struct page *page, unsigned int order,
206 TP_ARGS(page, order, gfp_flags, migratetype),
210 __field( unsigned int, order )
217 __entry->order = order;
225 __entry->order,
[all …]
Doom.h37 int order,
44 TP_ARGS(zoneref, order, reclaimable, available, min_wmark, no_progress_loops, wmark_check),
49 __field( int, order)
60 __entry->order = order;
70 __entry->order,
185 TP_PROTO(int order,
192 TP_ARGS(order, priority, result, retries, max_retries, ret),
195 __field( int, order)
204 __entry->order = order;
213 __entry->order,
/include/linux/
Dpage_owner.h13 extern void __reset_page_owner(struct page *page, unsigned int order);
15 unsigned int order, gfp_t gfp_mask);
23 static inline void reset_page_owner(struct page *page, unsigned int order) in reset_page_owner() argument
26 __reset_page_owner(page, order); in reset_page_owner()
30 unsigned int order, gfp_t gfp_mask) in set_page_owner() argument
33 __set_page_owner(page, order, gfp_mask); in set_page_owner()
57 static inline void reset_page_owner(struct page *page, unsigned int order) in reset_page_owner() argument
61 unsigned int order, gfp_t gfp_mask) in set_page_owner() argument
65 unsigned int order) in split_page_owner() argument
Dcompaction.h65 static inline unsigned long compact_gap(unsigned int order) in compact_gap() argument
80 return 2UL << order; in compact_gap()
92 extern unsigned int extfrag_for_order(struct zone *zone, unsigned int order);
93 extern int fragmentation_index(struct zone *zone, unsigned int order);
95 unsigned int order, unsigned int alloc_flags,
99 extern enum compact_result compaction_suitable(struct zone *zone, int order,
102 extern void compaction_defer_reset(struct zone *zone, int order,
177 bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
182 extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx);
191 static inline enum compact_result compaction_suitable(struct zone *zone, int order, in compaction_suitable() argument
[all …]
Dpage_pinner.h12 extern void __free_page_pinner(struct page *page, unsigned int order);
16 static inline void free_page_pinner(struct page *page, unsigned int order) in free_page_pinner() argument
19 __free_page_pinner(page, order); in free_page_pinner()
44 static inline void free_page_pinner(struct page *page, unsigned int order) in free_page_pinner() argument
Dgfp.h539 static inline void arch_free_page(struct page *page, int order) { } in arch_free_page() argument
542 static inline void arch_alloc_page(struct page *page, int order) { } in arch_alloc_page() argument
551 struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
586 __alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order) in __alloc_pages_node() argument
591 return __alloc_pages(gfp_mask, order, nid, NULL); in __alloc_pages_node()
600 unsigned int order) in alloc_pages_node() argument
605 return __alloc_pages_node(nid, gfp_mask, order); in alloc_pages_node()
609 struct page *alloc_pages(gfp_t gfp, unsigned int order);
610 extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
613 #define alloc_hugepage_vma(gfp_mask, vma, addr, order) \ argument
[all …]
Dmempool.h99 static inline int mempool_init_page_pool(mempool_t *pool, int min_nr, int order) in mempool_init_page_pool() argument
102 mempool_free_pages, (void *)(long)order); in mempool_init_page_pool()
105 static inline mempool_t *mempool_create_page_pool(int min_nr, int order) in mempool_create_page_pool() argument
108 (void *)(long)order); in mempool_create_page_pool()
Dkasan.h114 void __kasan_poison_pages(struct page *page, unsigned int order, bool init);
116 unsigned int order, bool init) in kasan_poison_pages() argument
119 __kasan_poison_pages(page, order, init); in kasan_poison_pages()
122 void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init);
124 unsigned int order, bool init) in kasan_unpoison_pages() argument
127 __kasan_unpoison_pages(page, order, init); in kasan_unpoison_pages()
274 static inline void kasan_poison_pages(struct page *page, unsigned int order, in kasan_poison_pages() argument
276 static inline void kasan_unpoison_pages(struct page *page, unsigned int order, in kasan_unpoison_pages() argument
Dpage_reporting.h23 unsigned int order; member
Dxarray.h1364 #define XA_STATE_ORDER(name, array, index, order) \ argument
1366 (index >> order) << order, \
1367 order - (order % XA_CHUNK_SHIFT), \
1368 (1U << (order % XA_CHUNK_SHIFT)) - 1)
1514 void xas_split(struct xa_state *, void *entry, unsigned int order);
1515 void xas_split_alloc(struct xa_state *, void *entry, unsigned int order, gfp_t);
1523 unsigned int order) in xas_split() argument
1529 unsigned int order, gfp_t gfp) in xas_split_alloc() argument
1590 unsigned int order) in xas_set_order() argument
1593 xas->xa_index = order < BITS_PER_LONG ? (index >> order) << order : 0; in xas_set_order()
[all …]
Ddma-map-ops.h122 unsigned int order, bool no_warn);
144 size_t count, unsigned int order, bool no_warn) in dma_alloc_from_contiguous() argument
178 int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
189 #define dma_release_from_dev_coherent(dev, order, vaddr) (0) argument
190 #define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0) argument
197 int dma_release_from_global_coherent(int order, void *vaddr);
207 static inline int dma_release_from_global_coherent(int order, void *vaddr) in dma_release_from_global_coherent() argument
Dslab.h517 extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __…
520 extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignm…
523 kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) in kmalloc_order_trace() argument
525 return kmalloc_order(size, flags, order); in kmalloc_order_trace()
531 unsigned int order = get_order(size); in kmalloc_large() local
532 return kmalloc_order_trace(size, flags, order); in kmalloc_large()
Dbitops.h71 int order; in get_bitmask_order() local
73 order = fls(count); in get_bitmask_order()
74 return order; /* We could be slightly more clever with -1 here... */ in get_bitmask_order()
Dhugetlb.h605 unsigned int order; member
650 void __init hugetlb_add_hstate(unsigned order);
700 return (unsigned long)PAGE_SIZE << h->order; in huge_page_size()
714 return h->order; in huge_page_order()
719 return h->order + PAGE_SHIFT; in huge_page_shift()
729 return 1 << h->order; in pages_per_huge_page()
769 return hstates[index].order + PAGE_SHIFT; in hstate_index_to_shift()
1095 extern void __init hugetlb_cma_reserve(int order);
1098 static inline __init void hugetlb_cma_reserve(int order) in hugetlb_cma_reserve() argument
Dmemcontrol.h1159 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
1547 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, in mem_cgroup_soft_limit_reclaim() argument
1718 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order);
1719 void __memcg_kmem_uncharge_page(struct page *page, int order);
1746 int order) in memcg_kmem_charge_page() argument
1749 return __memcg_kmem_charge_page(page, gfp, order); in memcg_kmem_charge_page()
1753 static inline void memcg_kmem_uncharge_page(struct page *page, int order) in memcg_kmem_uncharge_page() argument
1756 __memcg_kmem_uncharge_page(page, order); in memcg_kmem_uncharge_page()
1777 int order) in memcg_kmem_charge_page() argument
1782 static inline void memcg_kmem_uncharge_page(struct page *page, int order) in memcg_kmem_uncharge_page() argument
[all …]
Doom.h46 const int order; member
Dmmzone.h91 #define for_each_migratetype_order(order, type) \ argument
92 for (order = 0; order < MAX_ORDER; order++) \
1160 void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order,
1162 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
1165 bool zone_watermark_ok(struct zone *z, unsigned int order,
1168 bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
Dlsm_hooks.h1631 enum lsm_order order; /* Optional: default is LSM_ORDER_MUTABLE */ member
/include/trace/hooks/
Dmm.h62 TP_PROTO(gfp_t gfp_mask, unsigned int order, unsigned long delta),
63 TP_ARGS(gfp_mask, order, delta));
74 TP_PROTO(gfp_t gfp_mask, unsigned int order, unsigned long alloc_flags,
77 TP_ARGS(gfp_mask, order, alloc_flags, migratetype, did_some_progress, bypass));
93 TP_PROTO(struct page *page, int order, int migratetype, bool *bypass),
94 TP_ARGS(page, order, migratetype, bypass));
99 TP_PROTO(gfp_t gfp_mask, int order, int *alloc_flags,
101 TP_ARGS(gfp_mask, order, alloc_flags,
107 TP_PROTO(unsigned int order, struct per_cpu_pages *pcp, int migratetype,
109 TP_ARGS(order, pcp, migratetype, list));
[all …]
Diommu.h33 TP_PROTO(unsigned int order, gfp_t *alloc_flags),
34 TP_ARGS(order, alloc_flags));
/include/drm/
Ddrm_hashtab.h49 u8 order; member
52 int drm_ht_create(struct drm_open_hash *ht, unsigned int order);
/include/drm/ttm/
Dttm_pool.h51 unsigned int order; member
/include/xen/
Dxen-ops.h46 int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
49 void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order);

12