Home
last modified time | relevance | path

Searched refs:order (Results 1 – 25 of 25) sorted by relevance

/mm/
Dpage_alloc.c193 static void __free_pages_ok(struct page *page, unsigned int order);
615 void prep_compound_page(struct page *page, unsigned int order) in prep_compound_page() argument
618 int nr_pages = 1 << order; in prep_compound_page()
621 set_compound_order(page, order); in prep_compound_page()
690 unsigned int order, int migratetype) in set_page_guard() argument
697 if (order >= debug_guardpage_minorder()) in set_page_guard()
707 set_page_private(page, order); in set_page_guard()
709 __mod_zone_freepage_state(zone, -(1 << order), migratetype); in set_page_guard()
715 unsigned int order, int migratetype) in clear_page_guard() argument
730 __mod_zone_freepage_state(zone, (1 << order), migratetype); in clear_page_guard()
[all …]
Dcompaction.c48 #define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order)) argument
49 #define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order)) argument
71 unsigned int i, order, nr_pages; in map_pages() local
78 order = page_private(page); in map_pages()
79 nr_pages = 1 << order; in map_pages()
81 post_alloc_hook(page, order, __GFP_MOVABLE); in map_pages()
82 if (order) in map_pages()
83 split_page(page, order); in map_pages()
142 void defer_compaction(struct zone *zone, int order) in defer_compaction() argument
147 if (order < zone->compact_order_failed) in defer_compaction()
[all …]
Dpage_owner.c23 unsigned int order; member
110 void __reset_page_owner(struct page *page, unsigned int order) in __reset_page_owner() argument
115 for (i = 0; i < (1 << order); i++) { in __reset_page_owner()
173 depot_stack_handle_t handle, unsigned int order, gfp_t gfp_mask) in __set_page_owner_handle() argument
179 page_owner->order = order; in __set_page_owner_handle()
186 noinline void __set_page_owner(struct page *page, unsigned int order, in __set_page_owner() argument
196 __set_page_owner_handle(page_ext, handle, order, gfp_mask); in __set_page_owner()
211 void __split_page_owner(struct page *page, unsigned int order) in __split_page_owner() argument
221 page_owner->order = 0; in __split_page_owner()
222 for (i = 1; i < (1 << order); i++) in __split_page_owner()
[all …]
Dvmstat.c952 unsigned int order; in fill_contig_page_info() local
958 for (order = 0; order < MAX_ORDER; order++) { in fill_contig_page_info()
962 blocks = zone->free_area[order].nr_free; in fill_contig_page_info()
966 info->free_pages += blocks << order; in fill_contig_page_info()
969 if (order >= suitable_order) in fill_contig_page_info()
971 (order - suitable_order); in fill_contig_page_info()
982 static int __fragmentation_index(unsigned int order, struct contig_page_info *info) in __fragmentation_index() argument
984 unsigned long requested = 1UL << order; in __fragmentation_index()
986 if (WARN_ON_ONCE(order >= MAX_ORDER)) in __fragmentation_index()
1006 int fragmentation_index(struct zone *zone, unsigned int order) in fragmentation_index() argument
[all …]
Dinternal.h146 __find_buddy_pfn(unsigned long page_pfn, unsigned int order) in __find_buddy_pfn() argument
148 return page_pfn ^ (1 << order); in __find_buddy_pfn()
163 extern int __isolate_free_page(struct page *page, unsigned int order);
165 unsigned int order);
166 extern void prep_compound_page(struct page *page, unsigned int order);
167 extern void post_alloc_hook(struct page *page, unsigned int order,
195 int order; /* order a direct compactor needs */ member
214 int find_suitable_fallback(struct free_area *area, unsigned int order,
462 unsigned int order) in node_reclaim() argument
Dpage_isolation.c88 unsigned int order; in unset_migratetype_isolate() local
106 order = page_order(page); in unset_migratetype_isolate()
107 if (order >= pageblock_order) { in unset_migratetype_isolate()
109 buddy_pfn = __find_buddy_pfn(pfn, order); in unset_migratetype_isolate()
114 __isolate_free_page(page, order); in unset_migratetype_isolate()
134 post_alloc_hook(page, order, __GFP_MOVABLE); in unset_migratetype_isolate()
135 __free_pages(page, order); in unset_migratetype_isolate()
Dvmscan.c73 int order; member
1604 trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan, in isolate_lru_pages()
2523 if (IS_ENABLED(CONFIG_COMPACTION) && sc->order && in in_reclaim_compaction()
2524 (sc->order > PAGE_ALLOC_COSTLY_ORDER || in in_reclaim_compaction()
2578 pages_for_compaction = compact_gap(sc->order); in should_continue_reclaim()
2592 switch (compaction_suitable(zone, sc->order, 0, sc->reclaim_idx)) { in should_continue_reclaim()
2716 suitable = compaction_suitable(zone, sc->order, 0, sc->reclaim_idx); in compaction_ready()
2733 watermark = high_wmark_pages(zone) + compact_gap(sc->order); in compaction_ready()
2787 sc->order > PAGE_ALLOC_COSTLY_ORDER && in shrink_zones()
2810 sc->order, sc->gfp_mask, in shrink_zones()
[all …]
Dslob.c191 static void *slob_new_pages(gfp_t gfp, int order, int node) in slob_new_pages() argument
197 page = __alloc_pages_node(node, gfp, order); in slob_new_pages()
200 page = alloc_pages(gfp, order); in slob_new_pages()
208 static void slob_free_pages(void *b, int order) in slob_free_pages() argument
211 current->reclaim_state->reclaimed_slab += 1 << order; in slob_free_pages()
212 free_pages((unsigned long)b, order); in slob_free_pages()
453 unsigned int order = get_order(size); in __do_kmalloc_node() local
455 if (likely(order)) in __do_kmalloc_node()
457 ret = slob_new_pages(gfp, order, node); in __do_kmalloc_node()
460 size, PAGE_SIZE << order, gfp, node); in __do_kmalloc_node()
Dmempool.c66 int order = (int)(long)pool->pool_data; in check_element() local
69 __check_element(pool, addr, 1UL << (PAGE_SHIFT + order)); in check_element()
90 int order = (int)(long)pool->pool_data; in poison_element() local
93 __poison_element(addr, 1UL << (PAGE_SHIFT + order)); in poison_element()
484 int order = (int)(long)pool_data; in mempool_alloc_pages() local
485 return alloc_pages(gfp_mask, order); in mempool_alloc_pages()
491 int order = (int)(long)pool_data; in mempool_free_pages() local
492 __free_pages(element, order); in mempool_free_pages()
Dnobootmem.c101 int order; in __free_pages_memory() local
104 order = min(MAX_ORDER - 1UL, __ffs(start)); in __free_pages_memory()
106 while (start + (1UL << order) > end) in __free_pages_memory()
107 order--; in __free_pages_memory()
109 __free_pages_bootmem(pfn_to_page(start), start, order); in __free_pages_memory()
111 start += (1UL << order); in __free_pages_memory()
Dslub.c317 static inline int order_objects(int order, unsigned long size, int reserved) in order_objects() argument
319 return ((PAGE_SIZE << order) - reserved) / size; in order_objects()
322 static inline struct kmem_cache_order_objects oo_make(int order, in oo_make() argument
326 (order << OO_SHIFT) + order_objects(order, size, reserved) in oo_make()
1434 int order = oo_order(oo); in alloc_slab_page() local
1437 page = alloc_pages(flags, order); in alloc_slab_page()
1439 page = __alloc_pages_node(node, flags, order); in alloc_slab_page()
1441 if (page && memcg_charge_slab(page, flags, order, s)) { in alloc_slab_page()
1442 __free_pages(page, order); in alloc_slab_page()
1562 int idx, order; in allocate_slab() local
[all …]
Dslab.h274 gfp_t gfp, int order, in memcg_charge_slab() argument
281 return memcg_kmem_charge_memcg(page, gfp, order, s->memcg_params.memcg); in memcg_charge_slab()
284 static __always_inline void memcg_uncharge_slab(struct page *page, int order, in memcg_uncharge_slab() argument
289 memcg_kmem_uncharge(page, order); in memcg_uncharge_slab()
333 static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order, in memcg_charge_slab() argument
339 static inline void memcg_uncharge_slab(struct page *page, int order, in memcg_uncharge_slab() argument
Dhugetlb.c1048 unsigned int order) in destroy_compound_gigantic_page() argument
1051 int nr_pages = 1 << order; in destroy_compound_gigantic_page()
1064 static void free_gigantic_page(struct page *page, unsigned int order) in free_gigantic_page() argument
1066 free_contig_range(page_to_pfn(page), 1 << order); in free_gigantic_page()
1113 unsigned int order = huge_page_order(h); in alloc_gigantic_page() local
1114 unsigned long nr_pages = 1 << order; in alloc_gigantic_page()
1152 static void prep_compound_gigantic_page(struct page *page, unsigned int order);
1184 static inline void free_gigantic_page(struct page *page, unsigned int order) { } in free_gigantic_page() argument
1186 unsigned int order) { } in destroy_compound_gigantic_page() argument
1323 static void prep_compound_gigantic_page(struct page *page, unsigned int order) in prep_compound_gigantic_page() argument
[all …]
Dvmalloc.c866 static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) in new_vmap_block() argument
901 BUG_ON(VMAP_BBMAP_BITS <= (1UL << order)); in new_vmap_block()
902 vb->free = VMAP_BBMAP_BITS - (1UL << order); in new_vmap_block()
987 unsigned int order; in vb_alloc() local
999 order = get_order(size); in vb_alloc()
1007 if (vb->free < (1UL << order)) { in vb_alloc()
1014 vb->free -= 1UL << order; in vb_alloc()
1030 vaddr = new_vmap_block(order, gfp_mask); in vb_alloc()
1039 unsigned int order; in vb_free() local
1047 order = get_order(size); in vb_free()
[all …]
Doom_kill.c136 return oc->order == -1; in is_sysrq_oom()
417 oc->order, current->signal->oom_score_adj); in dump_header()
418 if (!IS_ENABLED(CONFIG_COMPACTION) && oc->order) in dump_header()
1107 .order = 0, in pagefault_out_of_memory()
Dmempolicy.c1965 static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, in alloc_page_interleave() argument
1970 page = __alloc_pages(gfp, order, nid); in alloc_page_interleave()
2003 alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, in alloc_pages_vma() argument
2016 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order); in alloc_pages_vma()
2018 page = alloc_page_interleave(gfp, order, nid); in alloc_pages_vma()
2071 page = __alloc_pages_node(hpage_node, gfp, order); in alloc_pages_vma()
2078 page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask); in alloc_pages_vma()
2099 struct page *alloc_pages_current(gfp_t gfp, unsigned order) in alloc_pages_current() argument
2112 page = alloc_page_interleave(gfp, order, interleave_nodes(pol)); in alloc_pages_current()
2114 page = __alloc_pages_nodemask(gfp, order, in alloc_pages_current()
Dslab_common.c1120 void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) in kmalloc_order() argument
1126 page = alloc_pages(flags, order); in kmalloc_order()
1135 void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) in kmalloc_order_trace() argument
1137 void *ret = kmalloc_order(size, flags, order); in kmalloc_order_trace()
1138 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags); in kmalloc_order_trace()
Dmadvise.c627 unsigned int order; in madvise_inject_error() local
633 for (; start < end; start += PAGE_SIZE << order) { in madvise_inject_error()
645 order = compound_order(compound_head(page)); in madvise_inject_error()
Dmemory_hotplug.c1224 int order; in next_active_pageblock() local
1226 order = page_order(page); in next_active_pageblock()
1227 if ((order < MAX_ORDER) && (order >= pageblock_order)) in next_active_pageblock()
1228 return page + (1 << order); in next_active_pageblock()
Dmemcontrol.c1262 int order) in mem_cgroup_out_of_memory() argument
1269 .order = order, in mem_cgroup_out_of_memory()
1548 static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order) in mem_cgroup_oom() argument
1569 current->memcg_oom_order = order; in mem_cgroup_oom()
2342 int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, in memcg_kmem_charge_memcg() argument
2345 unsigned int nr_pages = 1 << order; in memcg_kmem_charge_memcg()
2382 int memcg_kmem_charge(struct page *page, gfp_t gfp, int order) in memcg_kmem_charge() argument
2392 ret = memcg_kmem_charge_memcg(page, gfp, order, memcg); in memcg_kmem_charge()
2404 void memcg_kmem_uncharge(struct page *page, int order) in memcg_kmem_uncharge() argument
2407 unsigned int nr_pages = 1 << order; in memcg_kmem_uncharge()
[all …]
Dbootmem.c209 int order = ilog2(BITS_PER_LONG); in free_all_bootmem_core() local
211 __free_pages_bootmem(pfn_to_page(start), start, order); in free_all_bootmem_core()
Dnommu.c1120 int ret, order; in do_mmap_private() local
1147 order = get_order(len); in do_mmap_private()
1148 total = 1 << order; in do_mmap_private()
DKconfig239 high order (larger physically contiguous) memory blocks
242 invocations for high order memory requests. You shouldn't
596 in order to reduce fragmentation. However, this results in a
598 returned by an alloc(). This handle must be mapped in order to
Dslab.c1449 int order = cachep->gfporder; in kmem_freepages() local
1450 unsigned long nr_freed = (1 << order); in kmem_freepages()
1465 memcg_uncharge_slab(page, order, cachep); in kmem_freepages()
1466 __free_pages(page, order); in kmem_freepages()
/mm/kasan/
Dkasan.c308 void kasan_alloc_pages(struct page *page, unsigned int order) in kasan_alloc_pages() argument
311 kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order); in kasan_alloc_pages()
314 void kasan_free_pages(struct page *page, unsigned int order) in kasan_free_pages() argument
318 PAGE_SIZE << order, in kasan_free_pages()