Home
last modified time | relevance | path

Searched refs:order (Results 1 – 25 of 28) sorted by relevance

12

/mm/
Dcompaction.c48 #define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order)) argument
49 #define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order)) argument
71 unsigned int i, order, nr_pages; in split_map_pages() local
78 order = page_private(page); in split_map_pages()
79 nr_pages = 1 << order; in split_map_pages()
81 post_alloc_hook(page, order, __GFP_MOVABLE); in split_map_pages()
82 if (order) in split_map_pages()
83 split_page(page, order); in split_map_pages()
142 void defer_compaction(struct zone *zone, int order) in defer_compaction() argument
147 if (order < zone->compact_order_failed) in defer_compaction()
[all …]
Dpage_alloc.c248 static void __free_pages_ok(struct page *page, unsigned int order);
395 static inline void kasan_free_nondeferred_pages(struct page *page, int order) in kasan_free_nondeferred_pages() argument
398 kasan_free_pages(page, order); in kasan_free_nondeferred_pages()
689 void prep_compound_page(struct page *page, unsigned int order) in prep_compound_page() argument
692 int nr_pages = 1 << order; in prep_compound_page()
695 set_compound_order(page, order); in prep_compound_page()
751 unsigned int order, int migratetype) in set_page_guard() argument
756 if (order >= debug_guardpage_minorder()) in set_page_guard()
761 set_page_private(page, order); in set_page_guard()
763 __mod_zone_freepage_state(zone, -(1 << order), migratetype); in set_page_guard()
[all …]
Dpage_owner.c23 unsigned short order; member
142 void __reset_page_owner(struct page *page, unsigned int order) in __reset_page_owner() argument
154 for (i = 0; i < (1 << order); i++) { in __reset_page_owner()
164 unsigned int order, gfp_t gfp_mask) in __set_page_owner_handle() argument
169 for (i = 0; i < (1 << order); i++) { in __set_page_owner_handle()
172 page_owner->order = order; in __set_page_owner_handle()
182 noinline void __set_page_owner(struct page *page, unsigned int order, in __set_page_owner() argument
192 __set_page_owner_handle(page, page_ext, handle, order, gfp_mask); in __set_page_owner()
207 void __split_page_owner(struct page *page, unsigned int order) in __split_page_owner() argument
216 for (i = 0; i < (1 << order); i++) { in __split_page_owner()
[all …]
Dslab.h348 gfp_t gfp, int order, in memcg_charge_slab() argument
363 (1 << order)); in memcg_charge_slab()
364 percpu_ref_get_many(&s->memcg_params.refcnt, 1 << order); in memcg_charge_slab()
368 ret = memcg_kmem_charge_memcg(page, gfp, order, memcg); in memcg_charge_slab()
373 mod_lruvec_state(lruvec, cache_vmstat_idx(s), 1 << order); in memcg_charge_slab()
376 percpu_ref_get_many(&s->memcg_params.refcnt, 1 << order); in memcg_charge_slab()
377 css_put_many(&memcg->css, 1 << order); in memcg_charge_slab()
387 static __always_inline void memcg_uncharge_slab(struct page *page, int order, in memcg_uncharge_slab() argument
397 mod_lruvec_state(lruvec, cache_vmstat_idx(s), -(1 << order)); in memcg_uncharge_slab()
398 memcg_kmem_uncharge_memcg(page, order, memcg); in memcg_uncharge_slab()
[all …]
Dvmstat.c1024 unsigned int order; in fill_contig_page_info() local
1030 for (order = 0; order < MAX_ORDER; order++) { in fill_contig_page_info()
1034 blocks = zone->free_area[order].nr_free; in fill_contig_page_info()
1038 info->free_pages += blocks << order; in fill_contig_page_info()
1041 if (order >= suitable_order) in fill_contig_page_info()
1043 (order - suitable_order); in fill_contig_page_info()
1054 static int __fragmentation_index(unsigned int order, struct contig_page_info *info) in __fragmentation_index() argument
1056 unsigned long requested = 1UL << order; in __fragmentation_index()
1058 if (WARN_ON_ONCE(order >= MAX_ORDER)) in __fragmentation_index()
1078 int fragmentation_index(struct zone *zone, unsigned int order) in fragmentation_index() argument
[all …]
Dslob.c191 static void *slob_new_pages(gfp_t gfp, int order, int node) in slob_new_pages() argument
197 page = __alloc_pages_node(node, gfp, order); in slob_new_pages()
200 page = alloc_pages(gfp, order); in slob_new_pages()
206 1 << order); in slob_new_pages()
210 static void slob_free_pages(void *b, int order) in slob_free_pages() argument
215 current->reclaim_state->reclaimed_slab += 1 << order; in slob_free_pages()
218 -(1 << order)); in slob_free_pages()
219 __free_pages(sp, order); in slob_free_pages()
503 unsigned int order = get_order(size); in __do_kmalloc_node() local
505 if (likely(order)) in __do_kmalloc_node()
[all …]
Dinternal.h142 __find_buddy_pfn(unsigned long page_pfn, unsigned int order) in __find_buddy_pfn() argument
144 return page_pfn ^ (1 << order); in __find_buddy_pfn()
159 extern int __isolate_free_page(struct page *page, unsigned int order);
161 unsigned int order);
162 extern void __free_pages_core(struct page *page, unsigned int order);
163 extern void prep_compound_page(struct page *page, unsigned int order);
164 extern void post_alloc_hook(struct page *page, unsigned int order,
194 int order; /* order a direct compactor needs */ member
223 int find_suitable_fallback(struct free_area *area, unsigned int order,
480 unsigned int order) in node_reclaim() argument
Dshuffle.c61 static struct page * __meminit shuffle_valid_page(unsigned long pfn, int order) in shuffle_valid_page() argument
87 if (page_order(page) != order) in shuffle_valid_page()
110 const int order = SHUFFLE_ORDER; in __shuffle_zone() local
111 const int order_pages = 1 << order; in __shuffle_zone()
126 page_i = shuffle_valid_page(i, order); in __shuffle_zone()
140 page_j = shuffle_valid_page(j, order); in __shuffle_zone()
Dpage_isolation.c95 unsigned int order; in unset_migratetype_isolate() local
113 order = page_order(page); in unset_migratetype_isolate()
114 if (order >= pageblock_order) { in unset_migratetype_isolate()
116 buddy_pfn = __find_buddy_pfn(pfn, order); in unset_migratetype_isolate()
121 __isolate_free_page(page, order); in unset_migratetype_isolate()
141 post_alloc_hook(page, order, __GFP_MOVABLE); in unset_migratetype_isolate()
142 __free_pages(page, order); in unset_migratetype_isolate()
Dvmscan.c105 s8 order; member
1764 trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan, in isolate_lru_pages()
2679 if (IS_ENABLED(CONFIG_COMPACTION) && sc->order && in in_reclaim_compaction()
2680 (sc->order > PAGE_ALLOC_COSTLY_ORDER || in in_reclaim_compaction()
2725 switch (compaction_suitable(zone, sc->order, 0, sc->reclaim_idx)) { in should_continue_reclaim()
2739 pages_for_compaction = compact_gap(sc->order); in should_continue_reclaim()
2920 suitable = compaction_suitable(zone, sc->order, 0, sc->reclaim_idx); in compaction_ready()
2937 watermark = high_wmark_pages(zone) + compact_gap(sc->order); in compaction_ready()
2991 sc->order > PAGE_ALLOC_COSTLY_ORDER && in shrink_zones()
3014 sc->order, sc->gfp_mask, in shrink_zones()
[all …]
Dmemory_hotplug.c52 static void generic_online_page(struct page *page, unsigned int order);
599 static void generic_online_page(struct page *page, unsigned int order) in generic_online_page() argument
601 kernel_map_pages(page, 1 << order, 1); in generic_online_page()
602 __free_pages_core(page, order); in generic_online_page()
603 totalram_pages_add(1UL << order); in generic_online_page()
606 totalhigh_pages_add(1UL << order); in generic_online_page()
615 int order; in online_pages_range() local
622 for (pfn = start_pfn; pfn < end_pfn; pfn += 1ul << order) { in online_pages_range()
623 order = min(MAX_ORDER - 1, get_order(PFN_PHYS(end_pfn - pfn))); in online_pages_range()
625 if (WARN_ON_ONCE(!IS_ALIGNED(pfn, 1ul << order))) in online_pages_range()
[all …]
Dmempool.c66 int order = (int)(long)pool->pool_data; in check_element() local
69 __check_element(pool, addr, 1UL << (PAGE_SHIFT + order)); in check_element()
90 int order = (int)(long)pool->pool_data; in poison_element() local
93 __poison_element(addr, 1UL << (PAGE_SHIFT + order)); in poison_element()
547 int order = (int)(long)pool_data; in mempool_alloc_pages() local
548 return alloc_pages(gfp_mask, order); in mempool_alloc_pages()
554 int order = (int)(long)pool_data; in mempool_free_pages() local
555 __free_pages(element, order); in mempool_free_pages()
Dslub.c324 static inline unsigned int order_objects(unsigned int order, unsigned int size) in order_objects() argument
326 return ((unsigned int)PAGE_SIZE << order) / size; in order_objects()
329 static inline struct kmem_cache_order_objects oo_make(unsigned int order, in oo_make() argument
333 (order << OO_SHIFT) + order_objects(order, size) in oo_make()
1492 unsigned int order = oo_order(oo); in alloc_slab_page() local
1495 page = alloc_pages(flags, order); in alloc_slab_page()
1497 page = __alloc_pages_node(node, flags, order); in alloc_slab_page()
1499 if (page && charge_slab_page(page, flags, order, s)) { in alloc_slab_page()
1500 __free_pages(page, order); in alloc_slab_page()
1712 int order = compound_order(page); in __free_slab() local
[all …]
Dshuffle.h40 static inline bool is_shuffle_order(int order) in is_shuffle_order() argument
44 return order >= SHUFFLE_ORDER; in is_shuffle_order()
59 static inline bool is_shuffle_order(int order) in is_shuffle_order() argument
Dhugetlb.c1051 unsigned int order) in destroy_compound_gigantic_page() argument
1054 int nr_pages = 1 << order; in destroy_compound_gigantic_page()
1067 static void free_gigantic_page(struct page *page, unsigned int order) in free_gigantic_page() argument
1069 free_contig_range(page_to_pfn(page), 1 << order); in free_gigantic_page()
1118 unsigned int order = huge_page_order(h); in alloc_gigantic_page() local
1119 unsigned long nr_pages = 1 << order; in alloc_gigantic_page()
1155 static void prep_compound_gigantic_page(struct page *page, unsigned int order);
1170 static inline void free_gigantic_page(struct page *page, unsigned int order) { } in free_gigantic_page() argument
1172 unsigned int order) { } in destroy_compound_gigantic_page() argument
1381 static void prep_compound_gigantic_page(struct page *page, unsigned int order) in prep_compound_gigantic_page() argument
[all …]
Dsparse-vmemmap.c54 int order = get_order(size); in vmemmap_alloc_block() local
58 page = alloc_pages_node(node, gfp_mask, order); in vmemmap_alloc_block()
64 "vmemmap alloc failure: order:%u", order); in vmemmap_alloc_block()
Dvmalloc.c1456 static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) in new_vmap_block() argument
1491 BUG_ON(VMAP_BBMAP_BITS <= (1UL << order)); in new_vmap_block()
1492 vb->free = VMAP_BBMAP_BITS - (1UL << order); in new_vmap_block()
1577 unsigned int order; in vb_alloc() local
1589 order = get_order(size); in vb_alloc()
1597 if (vb->free < (1UL << order)) { in vb_alloc()
1604 vb->free -= 1UL << order; in vb_alloc()
1620 vaddr = new_vmap_block(order, gfp_mask); in vb_alloc()
1629 unsigned int order; in vb_free() local
1637 order = get_order(size); in vb_free()
[all …]
Doom_kill.c157 return oc->order == -1; in is_sysrq_oom()
455 current->comm, oc->gfp_mask, &oc->gfp_mask, oc->order, in dump_header()
457 if (!IS_ENABLED(CONFIG_COMPACTION) && oc->order) in dump_header()
1128 .order = 0, in pagefault_out_of_memory()
Dmempolicy.c2058 static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, in alloc_page_interleave() argument
2063 page = __alloc_pages(gfp, order, nid); in alloc_page_interleave()
2099 alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, in alloc_pages_vma() argument
2112 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order); in alloc_pages_vma()
2114 page = alloc_page_interleave(gfp, order, nid); in alloc_pages_vma()
2138 gfp | __GFP_THISNODE, order); in alloc_pages_vma()
2148 gfp | __GFP_NORETRY, order); in alloc_pages_vma()
2156 page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask); in alloc_pages_vma()
2178 struct page *alloc_pages_current(gfp_t gfp, unsigned order) in alloc_pages_current() argument
2191 page = alloc_page_interleave(gfp, order, interleave_nodes(pol)); in alloc_pages_current()
[all …]
Dslab_common.c1311 void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) in kmalloc_order() argument
1317 page = alloc_pages(flags, order); in kmalloc_order()
1321 1 << order); in kmalloc_order()
1331 void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) in kmalloc_order_trace() argument
1333 void *ret = kmalloc_order(size, flags, order); in kmalloc_order_trace()
1334 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags); in kmalloc_order_trace()
Dmemblock.c1894 int order; in __free_pages_memory() local
1897 order = min(MAX_ORDER - 1UL, __ffs(start)); in __free_pages_memory()
1899 while (start + (1UL << order) > end) in __free_pages_memory()
1900 order--; in __free_pages_memory()
1902 memblock_free_pages(pfn_to_page(start), start, order); in __free_pages_memory()
1904 start += (1UL << order); in __free_pages_memory()
Dmemcontrol.c1576 int order) in mem_cgroup_out_of_memory() argument
1583 .order = order, in mem_cgroup_out_of_memory()
1878 static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order) in mem_cgroup_oom() argument
1883 if (order > PAGE_ALLOC_COSTLY_ORDER) in mem_cgroup_oom()
1912 current->memcg_oom_order = order; in mem_cgroup_oom()
1925 if (mem_cgroup_out_of_memory(memcg, mask, order)) in mem_cgroup_oom()
2947 int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, in __memcg_kmem_charge_memcg() argument
2950 unsigned int nr_pages = 1 << order; in __memcg_kmem_charge_memcg()
2984 int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order) in __memcg_kmem_charge() argument
2994 ret = __memcg_kmem_charge_memcg(page, gfp, order, memcg); in __memcg_kmem_charge()
[all …]
Dmadvise.c867 unsigned int order; in madvise_inject_error() local
873 for (; start < end; start += PAGE_SIZE << order) { in madvise_inject_error()
887 order = compound_order(compound_head(page)); in madvise_inject_error()
Dnommu.c1007 int ret, order; in do_mmap_private() local
1034 order = get_order(len); in do_mmap_private()
1035 total = 1 << order; in do_mmap_private()
/mm/kasan/
Dcommon.c214 void kasan_alloc_pages(struct page *page, unsigned int order) in kasan_alloc_pages() argument
223 for (i = 0; i < (1 << order); i++) in kasan_alloc_pages()
225 kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order); in kasan_alloc_pages()
228 void kasan_free_pages(struct page *page, unsigned int order) in kasan_free_pages() argument
232 PAGE_SIZE << order, in kasan_free_pages()

12