Home
last modified time | relevance | path

Searched refs:order (Results 1 – 25 of 34) sorted by relevance

12

/mm/
Dcompaction.c53 #define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order)) argument
54 #define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order)) argument
94 unsigned int i, order, nr_pages; in split_map_pages() local
101 order = page_private(page); in split_map_pages()
102 nr_pages = 1 << order; in split_map_pages()
104 post_alloc_hook(page, order, __GFP_MOVABLE); in split_map_pages()
105 if (order) in split_map_pages()
106 split_page(page, order); in split_map_pages()
165 void defer_compaction(struct zone *zone, int order) in defer_compaction() argument
170 if (order < zone->compact_order_failed) in defer_compaction()
[all …]
Dpage_alloc.c265 static void __free_pages_ok(struct page *page, unsigned int order,
720 void prep_compound_page(struct page *page, unsigned int order) in prep_compound_page() argument
723 int nr_pages = 1 << order; in prep_compound_page()
734 set_compound_order(page, order); in prep_compound_page()
772 unsigned int order, int migratetype) in set_page_guard() argument
777 if (order >= debug_guardpage_minorder()) in set_page_guard()
782 set_page_private(page, order); in set_page_guard()
784 __mod_zone_freepage_state(zone, -(1 << order), migratetype); in set_page_guard()
790 unsigned int order, int migratetype) in clear_page_guard() argument
799 __mod_zone_freepage_state(zone, (1 << order), migratetype); in clear_page_guard()
[all …]
Dpage_owner.c24 unsigned short order; member
120 if (!IS_ALIGNED(pfn, 1 << page_owner->order)) in get_page_owner_handle()
167 void __reset_page_owner(struct page *page, unsigned int order) in __reset_page_owner() argument
180 for (i = 0; i < (1 << order); i++) { in __reset_page_owner()
192 unsigned int order, gfp_t gfp_mask) in __set_page_owner_handle() argument
197 for (i = 0; i < (1 << order); i++) { in __set_page_owner_handle()
200 page_owner->order = order; in __set_page_owner_handle()
212 noinline void __set_page_owner(struct page *page, unsigned int order, in __set_page_owner() argument
223 __set_page_owner_handle(page, page_ext, handle, order, gfp_mask); in __set_page_owner()
252 page_owner->order = 0; in __split_page_owner()
[all …]
Dvmstat.c1046 unsigned int order; in fill_contig_page_info() local
1052 for (order = 0; order < MAX_ORDER; order++) { in fill_contig_page_info()
1056 blocks = zone->free_area[order].nr_free; in fill_contig_page_info()
1060 info->free_pages += blocks << order; in fill_contig_page_info()
1063 if (order >= suitable_order) in fill_contig_page_info()
1065 (order - suitable_order); in fill_contig_page_info()
1076 static int __fragmentation_index(unsigned int order, struct contig_page_info *info) in __fragmentation_index() argument
1078 unsigned long requested = 1UL << order; in __fragmentation_index()
1080 if (WARN_ON_ONCE(order >= MAX_ORDER)) in __fragmentation_index()
1104 unsigned int extfrag_for_order(struct zone *zone, unsigned int order) in extfrag_for_order() argument
[all …]
Dpage_reporting.c80 unsigned int order = get_order(sg->length); in page_reporting_drain() local
82 __putback_isolated_page(page, order, mt); in page_reporting_drain()
95 if (PageBuddy(page) && buddy_order(page) == order) in page_reporting_drain()
110 unsigned int order, unsigned int mt, in page_reporting_cycle() argument
113 struct free_area *area = &zone->free_area[order]; in page_reporting_cycle()
115 unsigned int page_len = PAGE_SIZE << order; in page_reporting_cycle()
164 if (!__isolate_free_page(page, order)) { in page_reporting_cycle()
226 unsigned int order, mt, leftover, offset = PAGE_REPORTING_CAPACITY; in page_reporting_process_zone() local
242 for (order = PAGE_REPORTING_MIN_ORDER; order < MAX_ORDER; order++) { in page_reporting_process_zone()
248 err = page_reporting_cycle(prdev, zone, order, mt, in page_reporting_process_zone()
Dslob.c191 static void *slob_new_pages(gfp_t gfp, int order, int node) in slob_new_pages() argument
197 page = __alloc_pages_node(node, gfp, order); in slob_new_pages()
200 page = alloc_pages(gfp, order); in slob_new_pages()
206 PAGE_SIZE << order); in slob_new_pages()
210 static void slob_free_pages(void *b, int order) in slob_free_pages() argument
215 current->reclaim_state->reclaimed_slab += 1 << order; in slob_free_pages()
218 -(PAGE_SIZE << order)); in slob_free_pages()
219 __free_pages(sp, order); in slob_free_pages()
505 unsigned int order = get_order(size); in __do_kmalloc_node() local
507 if (likely(order)) in __do_kmalloc_node()
[all …]
Dinternal.h194 __find_buddy_pfn(unsigned long page_pfn, unsigned int order) in __find_buddy_pfn() argument
196 return page_pfn ^ (1 << order); in __find_buddy_pfn()
211 extern int __isolate_free_page(struct page *page, unsigned int order);
212 extern void __putback_isolated_page(struct page *page, unsigned int order,
215 unsigned int order);
216 extern void __free_pages_core(struct page *page, unsigned int order);
217 extern void prep_compound_page(struct page *page, unsigned int order);
218 extern void post_alloc_hook(struct page *page, unsigned int order,
251 int order; /* order a direct compactor needs */ member
282 int find_suitable_fallback(struct free_area *area, unsigned int order,
[all …]
Dshuffle.c38 unsigned long pfn, int order) in shuffle_valid_page() argument
63 if (buddy_order(page) != order) in shuffle_valid_page()
86 const int order = SHUFFLE_ORDER; in __shuffle_zone() local
87 const int order_pages = 1 << order; in __shuffle_zone()
102 page_i = shuffle_valid_page(z, i, order); in __shuffle_zone()
116 page_j = shuffle_valid_page(z, j, order); in __shuffle_zone()
Dvmscan.c134 s8 order; member
1798 trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan, in isolate_lru_pages()
2646 if (IS_ENABLED(CONFIG_COMPACTION) && sc->order && in in_reclaim_compaction()
2647 (sc->order > PAGE_ALLOC_COSTLY_ORDER || in in_reclaim_compaction()
2692 switch (compaction_suitable(zone, sc->order, 0, sc->reclaim_idx)) { in should_continue_reclaim()
2706 pages_for_compaction = compact_gap(sc->order); in should_continue_reclaim()
2978 suitable = compaction_suitable(zone, sc->order, 0, sc->reclaim_idx); in compaction_ready()
2995 watermark = high_wmark_pages(zone) + compact_gap(sc->order); in compaction_ready()
3049 sc->order > PAGE_ALLOC_COSTLY_ORDER && in shrink_zones()
3072 sc->order, sc->gfp_mask, in shrink_zones()
[all …]
Dpage_isolation.c72 unsigned int order; in unset_migratetype_isolate() local
90 order = buddy_order(page); in unset_migratetype_isolate()
91 if (order >= pageblock_order) { in unset_migratetype_isolate()
93 buddy_pfn = __find_buddy_pfn(pfn, order); in unset_migratetype_isolate()
98 __isolate_free_page(page, order); in unset_migratetype_isolate()
120 __putback_isolated_page(page, order, migratetype); in unset_migratetype_isolate()
Dmempool.c65 int order = (int)(long)pool->pool_data; in check_element() local
68 __check_element(pool, addr, 1UL << (PAGE_SHIFT + order)); in check_element()
88 int order = (int)(long)pool->pool_data; in poison_element() local
91 __poison_element(addr, 1UL << (PAGE_SHIFT + order)); in poison_element()
547 int order = (int)(long)pool_data; in mempool_alloc_pages() local
548 return alloc_pages(gfp_mask, order); in mempool_alloc_pages()
554 int order = (int)(long)pool_data; in mempool_free_pages() local
555 __free_pages(element, order); in mempool_free_pages()
Dpage_reporting.h34 static inline void page_reporting_notify_free(unsigned int order) in page_reporting_notify_free() argument
41 if (order < PAGE_REPORTING_MIN_ORDER) in page_reporting_notify_free()
50 static inline void page_reporting_notify_free(unsigned int order) in page_reporting_notify_free() argument
Dshuffle.h28 static inline bool is_shuffle_order(int order) in is_shuffle_order() argument
32 return order >= SHUFFLE_ORDER; in is_shuffle_order()
48 static inline bool is_shuffle_order(int order) in is_shuffle_order() argument
Dslub.c313 static inline unsigned int order_objects(unsigned int order, unsigned int size) in order_objects() argument
315 return ((unsigned int)PAGE_SIZE << order) / size; in order_objects()
318 static inline struct kmem_cache_order_objects oo_make(unsigned int order, in oo_make() argument
322 (order << OO_SHIFT) + order_objects(order, size) in oo_make()
1671 unsigned int order = oo_order(oo); in alloc_slab_page() local
1674 page = alloc_pages(flags, order); in alloc_slab_page()
1676 page = __alloc_pages_node(node, flags, order); in alloc_slab_page()
1679 account_slab_page(page, order, s); in alloc_slab_page()
1884 int order = compound_order(page); in __free_slab() local
1885 int pages = 1 << order; in __free_slab()
[all …]
Dpage_pinner.c160 void __reset_page_pinner(struct page *page, unsigned int order, bool free) in __reset_page_pinner() argument
170 for (i = 0; i < (1 << order); i++) { in __reset_page_pinner()
193 unsigned int order) in __set_page_pinner_handle() argument
199 for (i = 0; i < (1 << order); i++) { in __set_page_pinner_handle()
209 noinline void __set_page_pinner(struct page *page, unsigned int order) in __set_page_pinner() argument
219 __set_page_pinner_handle(page, page_ext, handle, order); in __set_page_pinner()
Dsparse-vmemmap.c53 int order = get_order(size); in vmemmap_alloc_block() local
57 page = alloc_pages_node(node, gfp_mask, order); in vmemmap_alloc_block()
63 "vmemmap alloc failure: order:%u", order); in vmemmap_alloc_block()
Dslab_common.c848 void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) in kmalloc_order() argument
857 page = alloc_pages(flags, order); in kmalloc_order()
861 PAGE_SIZE << order); in kmalloc_order()
871 void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) in kmalloc_order_trace() argument
873 void *ret = kmalloc_order(size, flags, order); in kmalloc_order_trace()
874 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags); in kmalloc_order_trace()
Dhugetlb.c1250 unsigned int order) in destroy_compound_gigantic_page() argument
1253 int nr_pages = 1 << order; in destroy_compound_gigantic_page()
1269 static void free_gigantic_page(struct page *page, unsigned int order) in free_gigantic_page() argument
1276 if (cma_release(hugetlb_cma[page_to_nid(page)], page, 1 << order)) in free_gigantic_page()
1280 free_contig_range(page_to_pfn(page), 1 << order); in free_gigantic_page()
1336 static inline void free_gigantic_page(struct page *page, unsigned int order) { } in free_gigantic_page() argument
1338 unsigned int order) { } in destroy_compound_gigantic_page() argument
1559 static void prep_compound_gigantic_page(struct page *page, unsigned int order) in prep_compound_gigantic_page() argument
1562 int nr_pages = 1 << order; in prep_compound_gigantic_page()
1566 set_compound_order(page, order); in prep_compound_gigantic_page()
[all …]
Dslab.h484 static __always_inline void account_slab_page(struct page *page, int order, in account_slab_page() argument
488 PAGE_SIZE << order); in account_slab_page()
491 static __always_inline void unaccount_slab_page(struct page *page, int order, in unaccount_slab_page() argument
498 -(PAGE_SIZE << order)); in unaccount_slab_page()
Dvmalloc.c1546 static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) in new_vmap_block() argument
1574 BUG_ON(VMAP_BBMAP_BITS <= (1UL << order)); in new_vmap_block()
1575 vb->free = VMAP_BBMAP_BITS - (1UL << order); in new_vmap_block()
1657 unsigned int order; in vb_alloc() local
1669 order = get_order(size); in vb_alloc()
1677 if (vb->free < (1UL << order)) { in vb_alloc()
1684 vb->free -= 1UL << order; in vb_alloc()
1700 vaddr = new_vmap_block(order, gfp_mask); in vb_alloc()
1708 unsigned int order; in vb_free() local
1716 order = get_order(size); in vb_free()
[all …]
Dmempolicy.c2143 static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, in alloc_page_interleave() argument
2148 page = __alloc_pages(gfp, order, nid); in alloc_page_interleave()
2184 alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, in alloc_pages_vma() argument
2197 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order); in alloc_pages_vma()
2199 page = alloc_page_interleave(gfp, order, nid); in alloc_pages_vma()
2227 gfp | __GFP_THISNODE | __GFP_NORETRY, order); in alloc_pages_vma()
2236 page = __alloc_pages_nodemask(gfp, order, in alloc_pages_vma()
2245 page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask); in alloc_pages_vma()
2267 struct page *alloc_pages_current(gfp_t gfp, unsigned order) in alloc_pages_current() argument
2280 page = alloc_page_interleave(gfp, order, interleave_nodes(pol)); in alloc_pages_current()
[all …]
Dmemblock.c1921 int order; in __free_pages_memory() local
1924 order = min(MAX_ORDER - 1UL, __ffs(start)); in __free_pages_memory()
1926 while (start + (1UL << order) > end) in __free_pages_memory()
1927 order--; in __free_pages_memory()
1929 memblock_free_pages(pfn_to_page(start), start, order); in __free_pages_memory()
1931 start += (1UL << order); in __free_pages_memory()
Dmemory_hotplug.c592 void generic_online_page(struct page *page, unsigned int order) in generic_online_page() argument
599 debug_pagealloc_map_pages(page, 1 << order); in generic_online_page()
600 __free_pages_core(page, order); in generic_online_page()
601 totalram_pages_add(1UL << order); in generic_online_page()
604 totalhigh_pages_add(1UL << order); in generic_online_page()
/mm/kasan/
Dhw_tags.c239 void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags) in kasan_alloc_pages() argument
253 for (i = 0; i != 1 << order; ++i) in kasan_alloc_pages()
256 kasan_unpoison_pages(page, order, init); in kasan_alloc_pages()
260 void kasan_free_pages(struct page *page, unsigned int order) in kasan_free_pages() argument
268 kasan_poison_pages(page, order, init); in kasan_free_pages()
Dcommon.c100 void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init) in __kasan_unpoison_pages() argument
109 for (i = 0; i < (1 << order); i++) in __kasan_unpoison_pages()
111 kasan_unpoison(page_address(page), PAGE_SIZE << order, init); in __kasan_unpoison_pages()
114 void __kasan_poison_pages(struct page *page, unsigned int order, bool init) in __kasan_poison_pages() argument
117 kasan_poison(page_address(page), PAGE_SIZE << order, in __kasan_poison_pages()

12