/mm/ |
D | page_alloc.c | 187 static void __free_pages_ok(struct page *page, unsigned int order); 507 void prep_compound_page(struct page *page, unsigned int order) in prep_compound_page() argument 510 int nr_pages = 1 << order; in prep_compound_page() 513 set_compound_order(page, order); in prep_compound_page() 576 unsigned int order, int migratetype) in set_page_guard() argument 590 set_page_private(page, order); in set_page_guard() 592 __mod_zone_freepage_state(zone, -(1 << order), migratetype); in set_page_guard() 596 unsigned int order, int migratetype) in clear_page_guard() argument 611 __mod_zone_freepage_state(zone, (1 << order), migratetype); in clear_page_guard() 616 unsigned int order, int migratetype) {} in set_page_guard() argument [all …]
|
D | compaction.c | 127 void defer_compaction(struct zone *zone, int order) in defer_compaction() argument 132 if (order < zone->compact_order_failed) in defer_compaction() 133 zone->compact_order_failed = order; in defer_compaction() 138 trace_mm_compaction_defer_compaction(zone, order); in defer_compaction() 142 bool compaction_deferred(struct zone *zone, int order) in compaction_deferred() argument 146 if (order < zone->compact_order_failed) in compaction_deferred() 156 trace_mm_compaction_deferred(zone, order); in compaction_deferred() 166 void compaction_defer_reset(struct zone *zone, int order, in compaction_defer_reset() argument 173 if (order >= zone->compact_order_failed) in compaction_defer_reset() 174 zone->compact_order_failed = order + 1; in compaction_defer_reset() [all …]
|
D | vmstat.c | 640 unsigned int order; in fill_contig_page_info() local 646 for (order = 0; order < MAX_ORDER; order++) { in fill_contig_page_info() 650 blocks = zone->free_area[order].nr_free; in fill_contig_page_info() 654 info->free_pages += blocks << order; in fill_contig_page_info() 657 if (order >= suitable_order) in fill_contig_page_info() 659 (order - suitable_order); in fill_contig_page_info() 670 static int __fragmentation_index(unsigned int order, struct contig_page_info *info) in __fragmentation_index() argument 672 unsigned long requested = 1UL << order; in __fragmentation_index() 691 int fragmentation_index(struct zone *zone, unsigned int order) in fragmentation_index() argument 695 fill_contig_page_info(zone, order, &info); in fragmentation_index() [all …]
|
D | vmscan.c | 69 int order; member 1415 trace_mm_vmscan_lru_isolate(sc->order, nr_to_scan, scan, in isolate_lru_pages() 2310 if (IS_ENABLED(CONFIG_COMPACTION) && sc->order && in in_reclaim_compaction() 2311 (sc->order > PAGE_ALLOC_COSTLY_ORDER || in in_reclaim_compaction() 2364 pages_for_compaction = (2UL << sc->order); in should_continue_reclaim() 2373 switch (compaction_suitable(zone, sc->order, 0, 0)) { in should_continue_reclaim() 2474 static inline bool compaction_ready(struct zone *zone, int order) in compaction_ready() argument 2487 watermark = high_wmark_pages(zone) + balance_gap + (2UL << order); in compaction_ready() 2494 if (compaction_deferred(zone, order)) in compaction_ready() 2501 if (compaction_suitable(zone, order, 0, 0) == COMPACT_SKIPPED) in compaction_ready() [all …]
|
D | kmemcheck.c | 8 void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node) in kmemcheck_alloc_shadow() argument 14 pages = 1 << order; in kmemcheck_alloc_shadow() 20 shadow = alloc_pages_node(node, flags | __GFP_NOTRACK, order); in kmemcheck_alloc_shadow() 38 void kmemcheck_free_shadow(struct page *page, int order) in kmemcheck_free_shadow() argument 47 pages = 1 << order; in kmemcheck_free_shadow() 56 __free_pages(shadow, order); in kmemcheck_free_shadow() 99 void kmemcheck_pagealloc_alloc(struct page *page, unsigned int order, in kmemcheck_pagealloc_alloc() argument 107 pages = 1 << order; in kmemcheck_pagealloc_alloc() 116 kmemcheck_alloc_shadow(page, order, gfpflags, -1); in kmemcheck_pagealloc_alloc()
|
D | page_isolation.c | 81 unsigned int order; in unset_migratetype_isolate() local 99 order = page_order(page); in unset_migratetype_isolate() 100 if (order >= pageblock_order) { in unset_migratetype_isolate() 102 buddy_idx = __find_buddy_index(page_idx, order); in unset_migratetype_isolate() 107 __isolate_free_page(page, order); in unset_migratetype_isolate() 108 kernel_map_pages(page, (1 << order), 1); in unset_migratetype_isolate() 129 __free_pages(isolated_page, order); in unset_migratetype_isolate()
|
D | internal.h | 197 __find_buddy_index(unsigned long page_idx, unsigned int order) in __find_buddy_index() argument 199 return page_idx ^ (1 << order); in __find_buddy_index() 202 extern int __isolate_free_page(struct page *page, unsigned int order); 204 unsigned int order); 205 extern void prep_compound_page(struct page *page, unsigned int order); 233 int order; /* order a direct compactor needs */ member 250 int find_suitable_fallback(struct free_area *area, unsigned int order,
|
D | slob.c | 190 static void *slob_new_pages(gfp_t gfp, int order, int node) in slob_new_pages() argument 196 page = __alloc_pages_node(node, gfp, order); in slob_new_pages() 199 page = alloc_pages(gfp, order); in slob_new_pages() 207 static void slob_free_pages(void *b, int order) in slob_free_pages() argument 210 current->reclaim_state->reclaimed_slab += 1 << order; in slob_free_pages() 211 free_pages((unsigned long)b, order); in slob_free_pages() 451 unsigned int order = get_order(size); in __do_kmalloc_node() local 453 if (likely(order)) in __do_kmalloc_node() 455 ret = slob_new_pages(gfp, order, node); in __do_kmalloc_node() 458 size, PAGE_SIZE << order, gfp, node); in __do_kmalloc_node()
|
D | mempool.c | 65 int order = (int)(long)pool->pool_data; in check_element() local 68 __check_element(pool, addr, 1UL << (PAGE_SHIFT + order)); in check_element() 89 int order = (int)(long)pool->pool_data; in poison_element() local 92 __poison_element(addr, 1UL << (PAGE_SHIFT + order)); in poison_element() 483 int order = (int)(long)pool_data; in mempool_alloc_pages() local 484 return alloc_pages(gfp_mask, order); in mempool_alloc_pages() 490 int order = (int)(long)pool_data; in mempool_free_pages() local 491 __free_pages(element, order); in mempool_free_pages()
|
D | page_owner.c | 49 void __reset_page_owner(struct page *page, unsigned int order) in __reset_page_owner() argument 54 for (i = 0; i < (1 << order); i++) { in __reset_page_owner() 62 void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask) in __set_page_owner() argument 78 page_ext->order = order; in __set_page_owner() 116 page_ext->order, page_ext->gfp_mask); in print_page_owner()
|
D | nobootmem.c | 96 int order; in __free_pages_memory() local 99 order = min(MAX_ORDER - 1UL, __ffs(start)); in __free_pages_memory() 101 while (start + (1UL << order) > end) in __free_pages_memory() 102 order--; in __free_pages_memory() 104 __free_pages_bootmem(pfn_to_page(start), start, order); in __free_pages_memory() 106 start += (1UL << order); in __free_pages_memory()
|
D | slub.c | 306 static inline int order_objects(int order, unsigned long size, int reserved) in order_objects() argument 308 return ((PAGE_SIZE << order) - reserved) / size; in order_objects() 311 static inline struct kmem_cache_order_objects oo_make(int order, in oo_make() argument 315 (order << OO_SHIFT) + order_objects(order, size, reserved) in oo_make() 1429 int order = oo_order(oo); in alloc_slab_page() local 1434 page = alloc_pages(flags, order); in alloc_slab_page() 1436 page = __alloc_pages_node(node, flags, order); in alloc_slab_page() 1438 if (page && memcg_charge_slab(page, flags, order, s)) { in alloc_slab_page() 1439 __free_pages(page, order); in alloc_slab_page() 1452 int idx, order; in allocate_slab() local [all …]
|
D | hugetlb.c | 1024 unsigned int order) in destroy_compound_gigantic_page() argument 1027 int nr_pages = 1 << order; in destroy_compound_gigantic_page() 1039 static void free_gigantic_page(struct page *page, unsigned int order) in free_gigantic_page() argument 1041 free_contig_range(page_to_pfn(page), 1 << order); in free_gigantic_page() 1083 static struct page *alloc_gigantic_page(int nid, unsigned int order) in alloc_gigantic_page() argument 1085 unsigned long nr_pages = 1 << order; in alloc_gigantic_page() 1119 static void prep_compound_gigantic_page(struct page *page, unsigned int order); 1152 static inline void free_gigantic_page(struct page *page, unsigned int order) { } in free_gigantic_page() argument 1154 unsigned int order) { } in destroy_compound_gigantic_page() argument 1293 static void prep_compound_gigantic_page(struct page *page, unsigned int order) in prep_compound_gigantic_page() argument [all …]
|
D | vmalloc.c | 822 static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) in new_vmap_block() argument 857 BUG_ON(VMAP_BBMAP_BITS <= (1UL << order)); in new_vmap_block() 858 vb->free = VMAP_BBMAP_BITS - (1UL << order); in new_vmap_block() 943 unsigned int order; in vb_alloc() local 955 order = get_order(size); in vb_alloc() 963 if (vb->free < (1UL << order)) { in vb_alloc() 970 vb->free -= 1UL << order; in vb_alloc() 986 vaddr = new_vmap_block(order, gfp_mask); in vb_alloc() 995 unsigned int order; in vb_free() local 1003 order = get_order(size); in vb_free() [all …]
|
D | slab.h | 240 gfp_t gfp, int order, in memcg_charge_slab() argument 247 return __memcg_kmem_charge_memcg(page, gfp, order, in memcg_charge_slab() 285 static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order, in memcg_charge_slab() argument
|
D | slab_common.c | 1008 void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) in kmalloc_order() argument 1014 page = alloc_kmem_pages(flags, order); in kmalloc_order() 1023 void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) in kmalloc_order_trace() argument 1025 void *ret = kmalloc_order(size, flags, order); in kmalloc_order_trace() 1026 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags); in kmalloc_order_trace()
|
D | oom_kill.c | 127 return oc->order == -1; in is_sysrq_oom() 390 current->comm, oc->gfp_mask, oc->order, in dump_header() 757 .order = 0, in pagefault_out_of_memory()
|
D | mempolicy.c | 1939 static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, in alloc_page_interleave() argument 1946 page = __alloc_pages(gfp, order, zl); in alloc_page_interleave() 1976 alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, in alloc_pages_vma() argument 1992 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order); in alloc_pages_vma() 1994 page = alloc_page_interleave(gfp, order, nid); in alloc_pages_vma() 2047 page = __alloc_pages_node(hpage_node, gfp, order); in alloc_pages_vma() 2054 page = __alloc_pages_nodemask(gfp, order, zl, nmask); in alloc_pages_vma() 2081 struct page *alloc_pages_current(gfp_t gfp, unsigned order) in alloc_pages_current() argument 2098 page = alloc_page_interleave(gfp, order, interleave_nodes(pol)); in alloc_pages_current() 2100 page = __alloc_pages_nodemask(gfp, order, in alloc_pages_current()
|
D | memcontrol.c | 1342 int order) in mem_cgroup_out_of_memory() argument 1348 .order = order, in mem_cgroup_out_of_memory() 1694 static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order) in mem_cgroup_oom() argument 1715 current->memcg_oom_order = order; in mem_cgroup_oom() 2419 int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, in __memcg_kmem_charge_memcg() argument 2422 unsigned int nr_pages = 1 << order; in __memcg_kmem_charge_memcg() 2443 int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order) in __memcg_kmem_charge() argument 2449 ret = __memcg_kmem_charge_memcg(page, gfp, order, memcg); in __memcg_kmem_charge() 2454 void __memcg_kmem_uncharge(struct page *page, int order) in __memcg_kmem_uncharge() argument 2457 unsigned int nr_pages = 1 << order; in __memcg_kmem_uncharge() [all …]
|
D | memory_hotplug.c | 1347 int order; in next_active_pageblock() local 1349 order = page_order(page); in next_active_pageblock() 1350 if ((order < MAX_ORDER) && (order >= pageblock_order)) in next_active_pageblock() 1351 return page + (1 << order); in next_active_pageblock()
|
D | bootmem.c | 211 int order = ilog2(BITS_PER_LONG); in free_all_bootmem_core() local 213 __free_pages_bootmem(pfn_to_page(start), start, order); in free_all_bootmem_core()
|
D | nommu.c | 1142 int ret, order; in do_mmap_private() local 1169 order = get_order(len); in do_mmap_private() 1170 total = 1 << order; in do_mmap_private()
|
D | Kconfig | 576 in order to reduce fragmentation. However, this results in a 578 returned by an alloc(). This handle must be mapped in order to
|
/mm/kasan/ |
D | kasan.c | 373 void kasan_alloc_pages(struct page *page, unsigned int order) in kasan_alloc_pages() argument 376 kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order); in kasan_alloc_pages() 379 void kasan_free_pages(struct page *page, unsigned int order) in kasan_free_pages() argument 383 PAGE_SIZE << order, in kasan_free_pages()
|