Lines Matching +full:functionally +full:- +full:reduced
1 // SPDX-License-Identifier: GPL-2.0-only
50 #include <linux/backing-dev.h>
51 #include <linux/fault-inject.h>
52 #include <linux/page-isolation.h>
85 /* Free Page Internal flags: for internal, non-pcp variants of free_pages(). */
94 * reporting it and marking it "reported" - it only skips notifying
103 * page shuffling (relevant code - e.g., memory onlining - is expected to
106 * Note: No code should rely on this flag for correctness - it's purely
113 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
135 /* work_structs for global per-cpu drains */
195 pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, will take precedence over init_on_alloc\n"); in early_init_on_alloc()
213 pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, will take precedence over init_on_free\n"); in early_init_on_free()
228 * other index - this ensures that it will be put on the correct CMA freelist.
232 return page->index; in get_pcppage_migratetype()
237 page->index = migratetype; in set_pcppage_migratetype()
287 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
288 * 1G machine -> (16M dma, 784M normal, 224M high)
293 * TBD: should special case ZONE_DMA32 machines here - in those we normally
355 int user_min_free_kbytes = -1;
359 * are not on separate NUMA nodes. Functionally this works but with
362 * many cases very high-order allocations like THP are likely to be
363 * unsupported and the premature reclaim offsets the advantage of long-term
400 * During boot we initialize deferred pages on-demand, as needed, but once
416 * on-demand allocation and then freed again before the deferred pages
430 if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn) in early_page_uninitialised()
454 /* Always populate low zones for address-constrained allocations */ in defer_init()
458 if (NODE_DATA(nid)->first_deferred_pfn != ULONG_MAX) in defer_init()
466 (pfn & (PAGES_PER_SECTION - 1)) == 0) { in defer_init()
467 NODE_DATA(nid)->first_deferred_pfn = pfn; in defer_init()
493 return page_zone(page)->pageblock_flags; in get_pageblock_bitmap()
500 pfn &= (PAGES_PER_SECTION-1); in pfn_to_bitidx()
502 pfn = pfn - round_down(page_zone(page)->zone_start_pfn, pageblock_nr_pages); in pfn_to_bitidx()
508 …* get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block o…
527 bitidx &= (BITS_PER_LONG-1); in __get_pfnblock_flags_mask()
545 …* set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pag…
565 bitidx &= (BITS_PER_LONG-1); in set_pfnblock_flags_mask()
601 start_pfn = zone->zone_start_pfn; in page_outside_zone_boundaries()
602 sp = zone->spanned_pages; in page_outside_zone_boundaries()
608 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n", in page_outside_zone_boundaries()
609 pfn, zone_to_nid(zone), zone->name, in page_outside_zone_boundaries()
670 current->comm, page_to_pfn(page)); in bad_page()
683 * Higher-order pages are called "compound pages". They are structured thusly:
688 * in bit 0 of page->compound_head. The rest of bits is pointer to head page.
690 * The first tail page's ->compound_dtor holds the offset in array of compound
693 * The first tail page's ->compound_order holds the order of allocation.
694 * This usage means that zero-order pages may not be compound.
712 p->mapping = TAIL_MAPPING; in prep_compound_page()
718 atomic_set(compound_mapcount_ptr(page), -1); in prep_compound_page()
777 INIT_LIST_HEAD(&page->lru); in set_page_guard()
780 __mod_zone_freepage_state(zone, -(1 << order), migratetype); in set_page_guard()
819 * Setting, clearing, and testing PageBuddy is serialized by zone->lock.
847 struct capture_control *capc = current->capture_control; in task_capc()
850 !(current->flags & PF_KTHREAD) && in task_capc()
851 !capc->page && in task_capc()
852 capc->cc->zone == zone ? capc : NULL; in task_capc()
859 if (!capc || order != capc->cc->order) in compaction_capture()
870 * and vice-versa but no more than normal fallback logic which can in compaction_capture()
871 * have trouble finding a high-order free page. in compaction_capture()
876 capc->page = page; in compaction_capture()
898 struct free_area *area = &zone->free_area[order]; in add_to_free_list()
900 list_add(&page->lru, &area->free_list[migratetype]); in add_to_free_list()
901 area->nr_free++; in add_to_free_list()
908 struct free_area *area = &zone->free_area[order]; in add_to_free_list_tail()
910 list_add_tail(&page->lru, &area->free_list[migratetype]); in add_to_free_list_tail()
911 area->nr_free++; in add_to_free_list_tail()
916 * of the list - so the moved pages won't immediately be considered for
922 struct free_area *area = &zone->free_area[order]; in move_to_free_list()
924 list_move_tail(&page->lru, &area->free_list[migratetype]); in move_to_free_list()
934 list_del(&page->lru); in del_page_from_free_list()
937 zone->free_area[order].nr_free--; in del_page_from_free_list()
942 * of the next-highest order is free. If it is, it's possible
955 if (order >= MAX_ORDER - 2) in buddy_merge_likely()
962 higher_page = page + (combined_pfn - pfn); in buddy_merge_likely()
964 higher_buddy = higher_page + (buddy_pfn - combined_pfn); in buddy_merge_likely()
973 * The concept of a buddy system is to maintain direct-mapped table
991 * -- nyc
1006 max_order = min_t(unsigned int, MAX_ORDER - 1, pageblock_order); in __free_one_page()
1009 VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page); in __free_one_page()
1011 VM_BUG_ON(migratetype == -1); in __free_one_page()
1015 VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page); in __free_one_page()
1021 __mod_zone_freepage_state(zone, -(1 << order), in __free_one_page()
1026 buddy = page + (buddy_pfn - pfn); in __free_one_page()
1041 page = page + (combined_pfn - pfn); in __free_one_page()
1045 if (order < MAX_ORDER - 1) { in __free_one_page()
1052 * low-order merging. in __free_one_page()
1058 buddy = page + (buddy_pfn - pfn); in __free_one_page()
1098 if (unlikely(atomic_read(&page->_mapcount) != -1)) in page_expected_state()
1101 if (unlikely((unsigned long)page->mapping | in page_expected_state()
1104 (unsigned long)page->mem_cgroup | in page_expected_state()
1106 (page->flags & check_flags))) in page_expected_state()
1116 if (unlikely(atomic_read(&page->_mapcount) != -1)) in page_bad_reason()
1118 if (unlikely(page->mapping != NULL)) in page_bad_reason()
1119 bad_reason = "non-NULL mapping"; in page_bad_reason()
1122 if (unlikely(page->flags & flags)) { in page_bad_reason()
1129 if (unlikely(page->mem_cgroup)) in page_bad_reason()
1156 * We rely page->lru.next never has bit 0 set, unless the page in free_tail_pages_check()
1157 * is PageTail(). Let's make sure that's true even for poisoned ->lru. in free_tail_pages_check()
1165 switch (page - head_page) { in free_tail_pages_check()
1167 /* the first tail page: ->mapping may be compound_mapcount() */ in free_tail_pages_check()
1175 * the second tail page: ->mapping is in free_tail_pages_check()
1176 * deferred_list.next -- ignore value. in free_tail_pages_check()
1180 if (page->mapping != TAIL_MAPPING) { in free_tail_pages_check()
1196 page->mapping = NULL; in free_tail_pages_check()
1234 * avoid checking PageCompound for order-0 pages. in free_pages_prepare()
1251 (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; in free_pages_prepare()
1255 page->mapping = NULL; in free_pages_prepare()
1264 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; in free_pages_prepare()
1294 * With DEBUG_VM enabled, order-0 pages are checked immediately when being freed
1312 * With DEBUG_VM disabled, order-0 pages being freed are checked only when
1335 struct page *buddy = page + (buddy_pfn - pfn); in prefetch_buddy()
1365 count = min(pcp->count, count); in free_pcppages_bulk()
1370 * Remove pages from lists in a round-robin fashion. A in free_pcppages_bulk()
1380 list = &pcp->lists[migratetype]; in free_pcppages_bulk()
1383 /* This is the only non-empty list. Free them all. */ in free_pcppages_bulk()
1390 list_del(&page->lru); in free_pcppages_bulk()
1391 pcp->count--; in free_pcppages_bulk()
1396 list_add_tail(&page->lru, &head); in free_pcppages_bulk()
1401 * under zone->lock. It is believed the overhead of in free_pcppages_bulk()
1403 * can be offset by reduced memory latency later. To in free_pcppages_bulk()
1405 * prefetch buddy for the first pcp->batch nr of pages. in free_pcppages_bulk()
1407 if (prefetch_nr++ < pcp->batch) in free_pcppages_bulk()
1409 } while (--count && --batch_free && !list_empty(list)); in free_pcppages_bulk()
1412 spin_lock(&zone->lock); in free_pcppages_bulk()
1417 * page->lru.next will not point to original list. in free_pcppages_bulk()
1430 spin_unlock(&zone->lock); in free_pcppages_bulk()
1438 spin_lock(&zone->lock); in free_one_page()
1444 spin_unlock(&zone->lock); in free_one_page()
1457 INIT_LIST_HEAD(&page->lru); in __init_single_page()
1478 struct zone *zone = &pgdat->node_zones[zid]; in init_reserved_page()
1480 if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone)) in init_reserved_page()
1508 /* Avoid false-positive PageTail() */ in reserve_bootmem_region()
1509 INIT_LIST_HEAD(&page->lru); in reserve_bootmem_region()
1551 for (loop = 0; loop < (nr_pages - 1); loop++, p++) { in __free_pages_core()
1559 atomic_long_add(nr_pages, &page_zone(page)->managed_pages); in __free_pages_core()
1583 if (state->last_start <= pfn && pfn < state->last_end) in __early_pfn_to_nid()
1584 return state->last_nid; in __early_pfn_to_nid()
1588 state->last_start = start_pfn; in __early_pfn_to_nid()
1589 state->last_end = end_pfn; in __early_pfn_to_nid()
1590 state->last_nid = nid; in __early_pfn_to_nid()
1644 end_pfn--; in __pageblock_pfn_to_page()
1667 unsigned long block_start_pfn = zone->zone_start_pfn; in set_zone_contiguous()
1684 zone->contiguous = true; in set_zone_contiguous()
1689 zone->contiguous = false; in clear_zone_contiguous()
1704 /* Free a large naturally-aligned chunk if possible */ in deferred_free_range()
1706 (pfn & (pageblock_nr_pages - 1)) == 0) { in deferred_free_range()
1713 if ((pfn & (pageblock_nr_pages - 1)) == 0) in deferred_free_range()
1743 if (!(pfn & (pageblock_nr_pages - 1)) && !pfn_valid(pfn)) in deferred_pfn_valid()
1755 unsigned long nr_pgmask = pageblock_nr_pages - 1; in deferred_free_pages()
1760 deferred_free_range(pfn - nr_free, nr_free); in deferred_free_pages()
1763 deferred_free_range(pfn - nr_free, nr_free); in deferred_free_pages()
1770 deferred_free_range(pfn - nr_free, nr_free); in deferred_free_pages()
1782 unsigned long nr_pgmask = pageblock_nr_pages - 1; in deferred_init_pages()
1804 * This function is meant to pre-load the iterator for the zone init.
1918 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); in deferred_init_memmap()
1931 first_init_pfn = pgdat->first_deferred_pfn; in deferred_init_memmap()
1939 BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn); in deferred_init_memmap()
1940 BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat)); in deferred_init_memmap()
1941 pgdat->first_deferred_pfn = ULONG_MAX; in deferred_init_memmap()
1946 * pre-grown prior to start of deferred page initialization. in deferred_init_memmap()
1952 zone = pgdat->node_zones + zid; in deferred_init_memmap()
1970 .size = epfn_align - spfn, in deferred_init_memmap()
1985 pgdat->node_id, jiffies_to_msecs(jiffies - start)); in deferred_init_memmap()
2010 pg_data_t *pgdat = zone->zone_pgdat; in deferred_grow_zone()
2011 unsigned long first_deferred_pfn = pgdat->first_deferred_pfn; in deferred_grow_zone()
2026 if (first_deferred_pfn != pgdat->first_deferred_pfn) { in deferred_grow_zone()
2034 pgdat->first_deferred_pfn = ULONG_MAX; in deferred_grow_zone()
2061 pgdat->first_deferred_pfn = spfn; in deferred_grow_zone()
2107 * on-demand struct page initialization. in page_alloc_init_late()
2135 } while (++p, --i); in init_cma_reserved_pageblock()
2144 __free_pages(p, MAX_ORDER - 1); in init_cma_reserved_pageblock()
2146 } while (i -= MAX_ORDER_NR_PAGES); in init_cma_reserved_pageblock()
2168 * -- nyc
2176 high--; in expand()
2196 if (unlikely(page->flags & __PG_HWPOISON)) { in check_new_page_bad()
2227 * With DEBUG_VM enabled, order-0 pages are checked for expected state when
2245 * With DEBUG_VM disabled, free order-0 pages are checked for expected state
2326 area = &(zone->free_area[current_order]); in __rmqueue_smallest()
2423 start_pfn = pfn & ~(pageblock_nr_pages - 1); in move_freepages_block()
2424 end_pfn = start_pfn + pageblock_nr_pages - 1; in move_freepages_block()
2439 int nr_pageblocks = 1 << (start_order - pageblock_order); in change_pageblock_range()
2441 while (nr_pageblocks--) { in change_pageblock_range()
2495 max_boost = mult_frac(zone->_watermark[WMARK_HIGH], in boost_watermark()
2511 zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages, in boost_watermark()
2520 * pageblock to our migratetype and determine how many already-allocated pages
2553 set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); in steal_suitable_fallback()
2571 * to MOVABLE pageblock, consider all non-movable pages as in steal_suitable_fallback()
2574 * exact migratetype of non-movable pages. in steal_suitable_fallback()
2578 - (free_pages + movable_pages); in steal_suitable_fallback()
2591 if (free_pages + alike_pages >= (1 << (pageblock_order-1)) || in steal_suitable_fallback()
2613 if (area->nr_free == 0) in find_suitable_fallback()
2614 return -1; in find_suitable_fallback()
2635 return -1; in find_suitable_fallback()
2639 * Reserve a pageblock for exclusive use of high-order atomic allocations if
2650 * Check is race-prone but harmless. in reserve_highatomic_pageblock()
2653 if (zone->nr_reserved_highatomic >= max_managed) in reserve_highatomic_pageblock()
2656 spin_lock_irqsave(&zone->lock, flags); in reserve_highatomic_pageblock()
2659 if (zone->nr_reserved_highatomic >= max_managed) in reserve_highatomic_pageblock()
2666 zone->nr_reserved_highatomic += pageblock_nr_pages; in reserve_highatomic_pageblock()
2672 spin_unlock_irqrestore(&zone->lock, flags); in reserve_highatomic_pageblock()
2677 * potentially hurts the reliability of high-order allocations when under
2687 struct zonelist *zonelist = ac->zonelist; in unreserve_highatomic_pageblock()
2695 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx, in unreserve_highatomic_pageblock()
2696 ac->nodemask) { in unreserve_highatomic_pageblock()
2701 if (!force && zone->nr_reserved_highatomic <= in unreserve_highatomic_pageblock()
2705 spin_lock_irqsave(&zone->lock, flags); in unreserve_highatomic_pageblock()
2707 struct free_area *area = &(zone->free_area[order]); in unreserve_highatomic_pageblock()
2717 * from highatomic to ac->migratetype. So we should in unreserve_highatomic_pageblock()
2723 * locking could inadvertently allow a per-cpu in unreserve_highatomic_pageblock()
2728 zone->nr_reserved_highatomic -= min( in unreserve_highatomic_pageblock()
2730 zone->nr_reserved_highatomic); in unreserve_highatomic_pageblock()
2734 * Convert to ac->migratetype and avoid the normal in unreserve_highatomic_pageblock()
2742 set_pageblock_migratetype(page, ac->migratetype); in unreserve_highatomic_pageblock()
2743 ret = move_freepages_block(zone, page, ac->migratetype, in unreserve_highatomic_pageblock()
2746 spin_unlock_irqrestore(&zone->lock, flags); in unreserve_highatomic_pageblock()
2750 spin_unlock_irqrestore(&zone->lock, flags); in unreserve_highatomic_pageblock()
2790 for (current_order = MAX_ORDER - 1; current_order >= min_order; in __rmqueue_fallback()
2791 --current_order) { in __rmqueue_fallback()
2792 area = &(zone->free_area[current_order]); in __rmqueue_fallback()
2795 if (fallback_mt == -1) in __rmqueue_fallback()
2818 area = &(zone->free_area[current_order]); in __rmqueue_fallback()
2821 if (fallback_mt != -1) in __rmqueue_fallback()
2826 * This should not happen - we already found a suitable fallback in __rmqueue_fallback()
2867 * Call me with the zone->lock already held.
2921 spin_lock(&zone->lock); in rmqueue_bulk()
2941 list_add_tail(&page->lru, list); in rmqueue_bulk()
2945 -(1 << order)); in rmqueue_bulk()
2954 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); in rmqueue_bulk()
2955 spin_unlock(&zone->lock); in rmqueue_bulk()
2974 batch = READ_ONCE(pcp->batch); in drain_zone_pages()
2975 to_drain = min(pcp->count, batch); in drain_zone_pages()
2996 pset = per_cpu_ptr(zone->pageset, cpu); in drain_pages_zone()
2998 pcp = &pset->pcp; in drain_pages_zone()
2999 if (pcp->count) in drain_pages_zone()
3000 free_pcppages_bulk(zone, pcp->count, pcp); in drain_pages_zone()
3021 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
3023 * The CPU has to be pinned. When zone parameter is non-NULL, spill just
3050 drain_local_pages(drain->zone); in drain_local_pages_wq()
3055 * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
3057 * When zone parameter is non-NULL, spill just the single zone's pages.
3101 pcp = per_cpu_ptr(zone->pageset, cpu); in drain_all_pages()
3102 if (pcp->pcp.count) in drain_all_pages()
3106 pcp = per_cpu_ptr(z->pageset, cpu); in drain_all_pages()
3107 if (pcp->pcp.count) { in drain_all_pages()
3123 drain->zone = zone; in drain_all_pages()
3124 INIT_WORK(&drain->work, drain_local_pages_wq); in drain_all_pages()
3125 queue_work_on(cpu, mm_percpu_wq, &drain->work); in drain_all_pages()
3128 flush_work(&per_cpu_ptr(&pcpu_drain, cpu)->work); in drain_all_pages()
3150 spin_lock_irqsave(&zone->lock, flags); in mark_free_pages()
3153 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) in mark_free_pages()
3157 if (!--page_count) { in mark_free_pages()
3171 &zone->free_area[order].free_list[t], lru) { in mark_free_pages()
3176 if (!--page_count) { in mark_free_pages()
3184 spin_unlock_irqrestore(&zone->lock, flags); in mark_free_pages()
3225 pcp = &this_cpu_ptr(zone->pageset)->pcp; in free_unref_page_commit()
3226 list_add(&page->lru, &pcp->lists[migratetype]); in free_unref_page_commit()
3227 pcp->count++; in free_unref_page_commit()
3228 if (pcp->count >= pcp->high) { in free_unref_page_commit()
3229 unsigned long batch = READ_ONCE(pcp->batch); in free_unref_page_commit()
3235 * Free a 0-order page
3251 * Free a list of 0-order pages
3263 list_del(&page->lru); in free_unref_page_list()
3289 * split_page takes a non-compound higher-order page, and splits it into
3290 * n (1<<order) sub-pages: page[0..n]
3291 * Each sub-page must be freed individually.
3324 * emulate a high-order watermark check with a raised order-0 in __isolate_free_page()
3325 * watermark, because we already know our high-order page in __isolate_free_page()
3328 watermark = zone->_watermark[WMARK_MIN] + (1UL << order); in __isolate_free_page()
3332 __mod_zone_freepage_state(zone, -(1UL << order), mt); in __isolate_free_page()
3343 if (order >= pageblock_order - 1) { in __isolate_free_page()
3344 struct page *endpage = page + (1 << order) - 1; in __isolate_free_page()
3359 * __putback_isolated_page - Return a now-isolated page back where we got it
3372 lockdep_assert_held(&zone->lock); in __putback_isolated_page()
3406 /* Remove page from the per-cpu list, caller must protect the list */
3416 pcp->count += rmqueue_bulk(zone, 0, in __rmqueue_pcplist()
3417 pcp->batch, list, in __rmqueue_pcplist()
3424 list_del(&page->lru); in __rmqueue_pcplist()
3425 pcp->count--; in __rmqueue_pcplist()
3431 /* Lock and remove page from the per-cpu list */
3442 pcp = &this_cpu_ptr(zone->pageset)->pcp; in rmqueue_pcplist()
3443 list = &pcp->lists[migratetype]; in rmqueue_pcplist()
3454 * Allocate a page from the given zone. Use pcplists for order-0 allocations.
3481 * allocate greater than order-1 page units with __GFP_NOFAIL. in rmqueue()
3484 spin_lock_irqsave(&zone->lock, flags); in rmqueue()
3489 * order-0 request can reach here when the pcplist is skipped in rmqueue()
3490 * due to non-CMA allocation context. HIGHATOMIC area is in rmqueue()
3491 * reserved for high-order atomic allocation, so order-0 in rmqueue()
3502 spin_unlock(&zone->lock); in rmqueue()
3505 __mod_zone_freepage_state(zone, -(1 << order), in rmqueue()
3514 if (test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags)) { in rmqueue()
3515 clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); in rmqueue()
3573 debugfs_create_bool("ignore-gfp-wait", mode, dir, in fail_page_alloc_debugfs()
3575 debugfs_create_bool("ignore-gfp-highmem", mode, dir, in fail_page_alloc_debugfs()
3577 debugfs_create_u32("min-order", mode, dir, &fail_page_alloc.min_order); in fail_page_alloc_debugfs()
3605 long unusable_free = (1 << order) - 1; in __zone_watermark_unusable_free()
3609 * the high-atomic reserves. This will over-estimate the size of the in __zone_watermark_unusable_free()
3613 unusable_free += z->nr_reserved_highatomic; in __zone_watermark_unusable_free()
3625 * Return true if free base pages are above 'mark'. For high-order checks it
3626 * will return true of the order-0 watermark is reached and there is at least
3638 /* free_pages may go negative - that's OK */ in __zone_watermark_ok()
3639 free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags); in __zone_watermark_ok()
3642 min -= min / 2; in __zone_watermark_ok()
3649 * makes during the free path will be small and short-lived. in __zone_watermark_ok()
3652 min -= min / 2; in __zone_watermark_ok()
3654 min -= min / 4; in __zone_watermark_ok()
3658 * Check watermarks for an order-0 allocation request. If these in __zone_watermark_ok()
3659 * are not met, then a high-order request also cannot go ahead in __zone_watermark_ok()
3662 if (free_pages <= min + z->lowmem_reserve[highest_zoneidx]) in __zone_watermark_ok()
3665 /* If this is an order-0 request then the watermark is fine */ in __zone_watermark_ok()
3669 /* For a high-order request, check at least one suitable page is free */ in __zone_watermark_ok()
3671 struct free_area *area = &z->free_area[o]; in __zone_watermark_ok()
3674 if (!area->nr_free) in __zone_watermark_ok()
3710 * Fast check for order-0 only. If this fails then the reserves in zone_watermark_fast()
3720 /* reserved may over estimate high-atomic reserves. */ in zone_watermark_fast()
3721 usable_free -= min(usable_free, reserved); in zone_watermark_fast()
3722 if (usable_free > mark + z->lowmem_reserve[highest_zoneidx]) in zone_watermark_fast()
3730 * Ignore watermark boosting for GFP_ATOMIC order-0 allocations in zone_watermark_fast()
3735 if (unlikely(!order && (gfp_mask & __GFP_ATOMIC) && z->watermark_boost in zone_watermark_fast()
3737 mark = z->_watermark[WMARK_MIN]; in zone_watermark_fast()
3750 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark) in zone_watermark_ok_safe()
3798 * the pointer is within zone->zone_pgdat->node_zones[]. Also assume in alloc_flags_nofragment()
3801 BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1); in alloc_flags_nofragment()
3802 if (nr_online_nodes > 1 && !populated_zone(--zone)) in alloc_flags_nofragment()
3814 unsigned int pflags = current->flags; in current_alloc_flags()
3843 z = ac->preferred_zoneref; in get_page_from_freelist()
3844 for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx, in get_page_from_freelist()
3845 ac->nodemask) { in get_page_from_freelist()
3864 * exceed the per-node dirty limit in the slowpath in get_page_from_freelist()
3870 * dirty-throttling and the flusher threads. in get_page_from_freelist()
3872 if (ac->spread_dirty_pages) { in get_page_from_freelist()
3873 if (last_pgdat_dirty_limit == zone->zone_pgdat) in get_page_from_freelist()
3876 if (!node_dirty_ok(zone->zone_pgdat)) { in get_page_from_freelist()
3877 last_pgdat_dirty_limit = zone->zone_pgdat; in get_page_from_freelist()
3883 zone != ac->preferred_zoneref->zone) { in get_page_from_freelist()
3891 local_nid = zone_to_nid(ac->preferred_zoneref->zone); in get_page_from_freelist()
3900 ac->highest_zoneidx, alloc_flags, in get_page_from_freelist()
3920 !zone_allows_reclaim(ac->preferred_zoneref->zone, zone)) in get_page_from_freelist()
3923 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order); in get_page_from_freelist()
3934 ac->highest_zoneidx, alloc_flags)) in get_page_from_freelist()
3942 page = rmqueue(ac->preferred_zoneref->zone, zone, order, in get_page_from_freelist()
3943 gfp_mask, alloc_flags, ac->migratetype); in get_page_from_freelist()
3948 * If this is a high-order atomic allocation then check in get_page_from_freelist()
3989 (current->flags & (PF_MEMALLOC | PF_EXITING))) in warn_alloc_show_mem()
4012 current->comm, &vaf, gfp_mask, &gfp_mask, in warn_alloc()
4047 .zonelist = ac->zonelist, in __alloc_pages_may_oom()
4048 .nodemask = ac->nodemask, in __alloc_pages_may_oom()
4081 if (current->flags & PF_DUMPCORE) in __alloc_pages_may_oom()
4097 if (ac->highest_zoneidx < ZONE_NORMAL) in __alloc_pages_may_oom()
4116 * Help non-failing allocations by giving them access to memory in __alloc_pages_may_oom()
4135 /* Try memory compaction for high-order allocations before reclaim */
4174 zone->compact_blockskip_flush = false; in __alloc_pages_direct_compact()
4218 * compaction was skipped because there are not enough order-0 pages in should_compact_retry()
4260 (*compact_priority)--; in should_compact_retry()
4293 * Let's give them a good hope and keep retrying while the order-0 in should_compact_retry()
4296 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, in should_compact_retry()
4297 ac->highest_zoneidx, ac->nodemask) { in should_compact_retry()
4299 ac->highest_zoneidx, alloc_flags)) in should_compact_retry()
4319 if (current->flags & PF_MEMALLOC) in __need_fs_reclaim()
4397 progress = try_to_free_pages(ac->zonelist, order, gfp_mask, in __perform_reclaim()
4398 ac->nodemask); in __perform_reclaim()
4427 * pages are pinned on the per-cpu lists or in high alloc reserves. in __alloc_pages_direct_reclaim()
4452 enum zone_type highest_zoneidx = ac->highest_zoneidx; in wake_all_kswapds()
4454 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx, in wake_all_kswapds()
4455 ac->nodemask) { in wake_all_kswapds()
4456 if (last_pgdat != zone->zone_pgdat) in wake_all_kswapds()
4458 last_pgdat = zone->zone_pgdat; in wake_all_kswapds()
4529 if (in_serving_softirq() && (current->flags & PF_MEMALLOC)) in __gfp_pfmemalloc_flags()
4532 if (current->flags & PF_MEMALLOC) in __gfp_pfmemalloc_flags()
4590 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, in should_reclaim_retry()
4591 ac->highest_zoneidx, ac->nodemask) { in should_reclaim_retry()
4605 ac->highest_zoneidx, alloc_flags, available); in should_reclaim_retry()
4640 if (current->flags & PF_WQ_WORKER) in should_reclaim_retry()
4656 * This assumes that for all allocations, ac->nodemask can come only in check_retry_cpuset()
4661 if (cpusets_enabled() && ac->nodemask && in check_retry_cpuset()
4662 !cpuset_nodemask_valid_mems_allowed(ac->nodemask)) { in check_retry_cpuset()
4663 ac->nodemask = NULL; in check_retry_cpuset()
4725 * there was a cpuset modification and we are retrying - otherwise we in __alloc_pages_slowpath()
4726 * could end up iterating over non-eligible zones endlessly. in __alloc_pages_slowpath()
4728 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, in __alloc_pages_slowpath()
4729 ac->highest_zoneidx, ac->nodemask); in __alloc_pages_slowpath()
4730 if (!ac->preferred_zoneref->zone) in __alloc_pages_slowpath()
4746 * that we have enough base pages and don't need to reclaim. For non- in __alloc_pages_slowpath()
4747 * movable high-order allocations, do that as well, as compaction will in __alloc_pages_slowpath()
4755 (order > 0 && ac->migratetype != MIGRATE_MOVABLE)) in __alloc_pages_slowpath()
4777 * - potentially very expensive because zones are far in __alloc_pages_slowpath()
4780 * - not guaranteed to help because isolate_freepages() in __alloc_pages_slowpath()
4783 * - unlikely to make entire pageblocks free on its in __alloc_pages_slowpath()
4814 ac->nodemask = NULL; in __alloc_pages_slowpath()
4815 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, in __alloc_pages_slowpath()
4816 ac->highest_zoneidx, ac->nodemask); in __alloc_pages_slowpath()
4829 if (current->flags & PF_MEMALLOC) in __alloc_pages_slowpath()
4866 * It doesn't make any sense to retry for the compaction if the order-0 in __alloc_pages_slowpath()
4929 WARN_ON_ONCE(current->flags & PF_MEMALLOC); in __alloc_pages_slowpath()
4940 * Help non-failing allocations by giving them access to memory in __alloc_pages_slowpath()
4953 warn_alloc(gfp_mask, ac->nodemask, in __alloc_pages_slowpath()
4964 ac->highest_zoneidx = gfp_zone(gfp_mask); in prepare_alloc_pages()
4965 ac->zonelist = node_zonelist(preferred_nid, gfp_mask); in prepare_alloc_pages()
4966 ac->nodemask = nodemask; in prepare_alloc_pages()
4967 ac->migratetype = gfp_migratetype(gfp_mask); in prepare_alloc_pages()
4975 if (!in_interrupt() && !ac->nodemask) in prepare_alloc_pages()
4976 ac->nodemask = &cpuset_current_mems_allowed; in prepare_alloc_pages()
4997 ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE); in prepare_alloc_pages()
5004 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, in prepare_alloc_pages()
5005 ac->highest_zoneidx, ac->nodemask); in prepare_alloc_pages()
5040 alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp_mask); in __alloc_pages_nodemask()
5058 * &cpuset_current_mems_allowed to optimize the fast-path attempt. in __alloc_pages_nodemask()
5115 while (order-- > 0) in __free_pages()
5132 * An arbitrary-length arbitrary-offset area of memory which resides
5139 * sk_buff->head, or to be used in the "frags" portion of skb_shared_info.
5152 nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE; in __page_frag_cache_refill()
5157 nc->va = page ? page_address(page) : NULL; in __page_frag_cache_refill()
5161 int order = get_order(nc->size); in __page_frag_cache_refill()
5188 mod_zone_page_state(page_zone(page), NR_SKB_PAGES, -(long)deta); in __page_frag_cache_drain()
5203 if (unlikely(!nc->va)) { in page_frag_alloc()
5211 size = nc->size; in page_frag_alloc()
5219 nc->pfmemalloc = page_is_pfmemalloc(page); in page_frag_alloc()
5220 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1; in page_frag_alloc()
5221 nc->offset = size; in page_frag_alloc()
5224 offset = nc->offset - fragsz; in page_frag_alloc()
5226 page = virt_to_page(nc->va); in page_frag_alloc()
5228 if (!page_ref_sub_and_test(page, nc->pagecnt_bias)) in page_frag_alloc()
5231 if (unlikely(nc->pfmemalloc)) { in page_frag_alloc()
5238 size = nc->size; in page_frag_alloc()
5244 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1; in page_frag_alloc()
5245 offset = size - fragsz; in page_frag_alloc()
5260 nc->pagecnt_bias--; in page_frag_alloc()
5261 nc->offset = offset; in page_frag_alloc()
5263 return nc->va + offset; in page_frag_alloc()
5279 mod_zone_page_state(page_zone(page), NR_SKB_PAGES, -(long)deta); in page_frag_free()
5304 * alloc_pages_exact - allocate an exact number physically-contiguous pages.
5310 * allocate memory in power-of-two pages.
5332 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
5358 * free_pages_exact - release memory allocated via alloc_pages_exact()
5377 * nr_free_zone_pages - count number of pages beyond high watermark
5384 * nr_free_zone_pages = managed_pages - high_pages
5402 sum += size - high; in nr_free_zone_pages()
5409 * nr_free_buffer_pages - count number of pages beyond high watermark
5449 available = global_zone_page_state(NR_FREE_PAGES) - totalreserve_pages; in si_mem_available()
5457 pagecache -= min(pagecache / 2, wmark_low); in si_mem_available()
5467 available += reclaimable - min(reclaimable / 2, wmark_low); in si_mem_available()
5477 val->totalram = totalram_pages(); in si_meminfo()
5478 val->sharedram = global_node_page_state(NR_SHMEM); in si_meminfo()
5479 val->freeram = global_zone_page_state(NR_FREE_PAGES); in si_meminfo()
5480 val->bufferram = nr_blockdev_pages(); in si_meminfo()
5481 val->totalhigh = totalhigh_pages(); in si_meminfo()
5482 val->freehigh = nr_free_highpages(); in si_meminfo()
5483 val->mem_unit = PAGE_SIZE; in si_meminfo()
5498 managed_pages += zone_managed_pages(&pgdat->node_zones[zone_type]); in si_meminfo_node()
5499 val->totalram = managed_pages; in si_meminfo_node()
5500 val->sharedram = node_page_state(pgdat, NR_SHMEM); in si_meminfo_node()
5501 val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES); in si_meminfo_node()
5504 struct zone *zone = &pgdat->node_zones[zone_type]; in si_meminfo_node()
5511 val->totalhigh = managed_highpages; in si_meminfo_node()
5512 val->freehigh = free_highpages; in si_meminfo_node()
5514 val->totalhigh = managed_highpages; in si_meminfo_node()
5515 val->freehigh = free_highpages; in si_meminfo_node()
5517 val->mem_unit = PAGE_SIZE; in si_meminfo_node()
5531 * no node mask - aka implicit memory numa policy. Do not bother with in show_mem_node_skip()
5532 * the synchronization - read_mems_allowed_begin - because we do not in show_mem_node_skip()
5541 #define K(x) ((x) << (PAGE_SHIFT-10))
5571 * Show free area list (used inside shift_scroll-lock stuff)
5591 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count; in show_free_areas()
5620 if (show_mem_node_skip(filter, pgdat->node_id, nodemask)) in show_free_areas()
5647 pgdat->node_id, in show_free_areas()
5670 pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ? in show_free_areas()
5682 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count; in show_free_areas()
5707 zone->name, in show_free_areas()
5712 K(zone->nr_reserved_highatomic), in show_free_areas()
5719 K(zone->present_pages), in show_free_areas()
5725 K(this_cpu_read(zone->pageset->pcp.count)), in show_free_areas()
5729 printk(KERN_CONT " %ld", zone->lowmem_reserve[i]); in show_free_areas()
5741 printk(KERN_CONT "%s: ", zone->name); in show_free_areas()
5743 spin_lock_irqsave(&zone->lock, flags); in show_free_areas()
5745 struct free_area *area = &zone->free_area[order]; in show_free_areas()
5748 nr[order] = area->nr_free; in show_free_areas()
5757 spin_unlock_irqrestore(&zone->lock, flags); in show_free_areas()
5776 zoneref->zone = zone; in zoneref_set_zone()
5777 zoneref->zone_idx = zone_idx(zone); in zoneref_set_zone()
5792 zone_type--; in build_zonerefs_node()
5793 zone = pgdat->node_zones + zone_type; in build_zonerefs_node()
5815 return -EINVAL; in __parse_numa_zonelist_order()
5838 * find_next_best_node - find the next node that should appear in a given node's fallback list
5899 * This results in maximum locality--normal zone overflows into local
5900 * DMA zone, if any--but risks exhausting DMA zone.
5908 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs; in build_zonelists_in_node_order()
5918 zonerefs->zone = NULL; in build_zonelists_in_node_order()
5919 zonerefs->zone_idx = 0; in build_zonelists_in_node_order()
5930 zonerefs = pgdat->node_zonelists[ZONELIST_NOFALLBACK]._zonerefs; in build_thisnode_zonelists()
5933 zonerefs->zone = NULL; in build_thisnode_zonelists()
5934 zonerefs->zone_idx = 0; in build_thisnode_zonelists()
5951 /* NUMA-aware ordering of nodes */ in build_zonelists()
5952 local_node = pgdat->node_id; in build_zonelists()
5961 * distance group to make it round-robin. in build_zonelists()
5969 load--; in build_zonelists()
5990 return zone_to_nid(z->zone); in local_memory_node()
6004 local_node = pgdat->node_id; in build_zonelists()
6006 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs; in build_zonelists()
6031 zonerefs->zone = NULL; in build_zonelists()
6032 zonerefs->zone_idx = 0; in build_zonelists()
6071 * seqlock to prevent any printk() from trying to hold port->lock, for in __build_all_zonelists()
6073 * calling kmalloc(GFP_ATOMIC | __GFP_NOWARN) with port->lock held. in __build_all_zonelists()
6084 * building zonelists is fine - no need to touch other nodes. in __build_all_zonelists()
6086 if (self && !node_online(self->node_id)) { in __build_all_zonelists()
6097 * We now know the "local memory node" for each node-- in __build_all_zonelists()
6099 * Set up numa_mem percpu variable for on-line cpus. During in __build_all_zonelists()
6100 * boot, only the boot cpu should be on-line; we'll init the in __build_all_zonelists()
6101 * secondary cpus' numa_mem as they come on-line. During in __build_all_zonelists()
6102 * node/memory hotplug, we'll fixup all on-line cpus. in __build_all_zonelists()
6132 * (a chicken-egg dilemma). in build_all_zonelists_init()
6162 * more accurate, but expensive to check per-zone. This check is in build_all_zonelists()
6163 * made on memory-hotadd so a system can start with mobility in build_all_zonelists()
6203 * Initially all pages are reserved - free ones are freed
6205 * done. Non-atomic initialization, single-pass.
6219 if (highest_memmap_pfn < end_pfn - 1) in memmap_init_zone()
6220 highest_memmap_pfn = end_pfn - 1; in memmap_init_zone()
6234 if (start_pfn == altmap->base_pfn) in memmap_init_zone()
6235 start_pfn += altmap->reserve; in memmap_init_zone()
6236 end_pfn = altmap->base_pfn + vmem_altmap_offset(altmap); in memmap_init_zone()
6242 * There can be holes in boot-time mem_map[]s handed to this in memmap_init_zone()
6277 struct pglist_data *pgdat = zone->zone_pgdat; in memmap_init_zone_device()
6281 int nid = pgdat->node_id; in memmap_init_zone_device()
6292 start_pfn = altmap->base_pfn + vmem_altmap_offset(altmap); in memmap_init_zone_device()
6293 nr_pages = end_pfn - start_pfn; in memmap_init_zone_device()
6305 * We can use the non-atomic __set_bit operation for setting in memmap_init_zone_device()
6311 * ZONE_DEVICE pages union ->lru with a ->pgmap back pointer in memmap_init_zone_device()
6313 * ever freed or placed on a driver-private list. in memmap_init_zone_device()
6315 page->pgmap = pgmap; in memmap_init_zone_device()
6316 page->zone_device_data = NULL; in memmap_init_zone_device()
6322 * the address space during boot when many long-lived in memmap_init_zone_device()
6335 nr_pages, jiffies_to_msecs(jiffies - start)); in memmap_init_zone_device()
6343 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); in zone_init_free_lists()
6344 zone->free_area[order].nr_free = 0; in zone_init_free_lists()
6356 * - physical memory bank size is not necessarily the exact multiple of the
6358 * - early reserved memory may not be listed in memblock.memory
6359 * - memory layouts defined with memmap= kernel parameter may not align
6363 * - PG_Reserved is set
6364 * - zone and node links point to zone and node that span the page if the
6366 * - zone and node links point to adjacent zone/node if the hole falls on
6381 + pageblock_nr_pages - 1; in init_unavailable_range()
6406 unsigned long zone_start_pfn = zone->zone_start_pfn; in memmap_init_zone_range()
6407 unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages; in memmap_init_zone_range()
6416 memmap_init_zone(end_pfn - start_pfn, nid, zone_id, start_pfn, in memmap_init_zone_range()
6435 struct zone *zone = node->node_zones + j; in memmap_init()
6462 /* A stub for backwards compatibility with custom implementatin on IA-64 */
6475 * The per-cpu-pages pools are set to around 1000th of the in zone_batchsize()
6487 * Clamp the batch to a 2^n - 1 value. Having a power in zone_batchsize()
6496 batch = rounddown_pow_of_two(batch + batch/2) - 1; in zone_batchsize()
6512 * fragmented and becoming unavailable for high-order allocations. in zone_batchsize()
6519 * pcp->high and pcp->batch values are related and dependent on one another:
6520 * ->batch must never be higher then ->high.
6524 * Any new users of pcp->batch and pcp->high should ensure they can cope with
6535 pcp->batch = 1; in pageset_update()
6539 pcp->high = high; in pageset_update()
6542 pcp->batch = batch; in pageset_update()
6548 pageset_update(&p->pcp, 6 * batch, max(1UL, 1 * batch)); in pageset_set_batch()
6558 pcp = &p->pcp; in pageset_init()
6560 INIT_LIST_HEAD(&pcp->lists[migratetype]); in pageset_init()
6580 pageset_update(&p->pcp, high, batch); in pageset_set_high()
6596 struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu); in zone_pageset_init()
6605 zone->pageset = alloc_percpu(struct per_cpu_pageset); in setup_zone_pageset()
6632 memset(pcp->vm_numa_stat_diff, 0, in setup_per_cpu_pageset()
6633 sizeof(pcp->vm_numa_stat_diff)); in setup_per_cpu_pageset()
6638 pgdat->per_cpu_nodestats = in setup_per_cpu_pageset()
6649 zone->pageset = &boot_pageset; in zone_pcp_init()
6653 zone->name, zone->present_pages, in zone_pcp_init()
6661 struct pglist_data *pgdat = zone->zone_pgdat; in init_currently_empty_zone()
6664 if (zone_idx > pgdat->nr_zones) in init_currently_empty_zone()
6665 pgdat->nr_zones = zone_idx; in init_currently_empty_zone()
6667 zone->zone_start_pfn = zone_start_pfn; in init_currently_empty_zone()
6670 "Initialising map node %d zone %lu pfns %lu -> %lu\n", in init_currently_empty_zone()
6671 pgdat->node_id, in init_currently_empty_zone()
6676 zone->initialized = 1; in init_currently_empty_zone()
6680 * get_pfn_range_for_nid - Return the start and end page frames for a node
6696 *start_pfn = -1UL; in get_pfn_range_for_nid()
6704 if (*start_pfn == -1UL) in get_pfn_range_for_nid()
6716 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) { in find_usable_zone_for_movable()
6725 VM_BUG_ON(zone_index == -1); in find_usable_zone_for_movable()
6768 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
6799 return *zone_end_pfn - *zone_start_pfn; in zone_spanned_pages_in_node()
6810 unsigned long nr_absent = range_end_pfn - range_start_pfn; in __absent_pages_in_range()
6817 nr_absent -= end_pfn - start_pfn; in __absent_pages_in_range()
6823 * absent_pages_in_range - Return number of page frames in holes within a range
6875 nr_absent += end_pfn - start_pfn; in zone_absent_pages_in_node()
6879 nr_absent += end_pfn - start_pfn; in zone_absent_pages_in_node()
6894 struct zone *zone = pgdat->node_zones + i; in calculate_node_totalpages()
6899 spanned = zone_spanned_pages_in_node(pgdat->node_id, i, in calculate_node_totalpages()
6904 absent = zone_absent_pages_in_node(pgdat->node_id, i, in calculate_node_totalpages()
6909 real_size = size - absent; in calculate_node_totalpages()
6912 zone->zone_start_pfn = zone_start_pfn; in calculate_node_totalpages()
6914 zone->zone_start_pfn = 0; in calculate_node_totalpages()
6915 zone->spanned_pages = size; in calculate_node_totalpages()
6916 zone->present_pages = real_size; in calculate_node_totalpages()
6922 pgdat->node_spanned_pages = totalpages; in calculate_node_totalpages()
6923 pgdat->node_present_pages = realtotalpages; in calculate_node_totalpages()
6924 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id, in calculate_node_totalpages()
6930 * Calculate the size of the zone->blockflags rounded to an unsigned long
6940 zonesize += zone_start_pfn & (pageblock_nr_pages-1); in usemap_size()
6955 zone->pageblock_flags = NULL; in setup_usemap()
6957 zone->pageblock_flags = in setup_usemap()
6959 pgdat->node_id); in setup_usemap()
6960 if (!zone->pageblock_flags) in setup_usemap()
6962 usemapsize, zone->name, pgdat->node_id); in setup_usemap()
6984 order = MAX_ORDER - 1; in set_pageblock_order()
6997 * is unused as pageblock_order is set at compile-time. See
6998 * include/linux/pageblock-flags.h for the values of pageblock_order based on
7030 struct deferred_split *ds_queue = &pgdat->deferred_split_queue; in pgdat_init_split_queue()
7032 spin_lock_init(&ds_queue->split_queue_lock); in pgdat_init_split_queue()
7033 INIT_LIST_HEAD(&ds_queue->split_queue); in pgdat_init_split_queue()
7034 ds_queue->split_queue_len = 0; in pgdat_init_split_queue()
7043 init_waitqueue_head(&pgdat->kcompactd_wait); in pgdat_init_kcompactd()
7056 init_waitqueue_head(&pgdat->kswapd_wait); in pgdat_init_internals()
7057 init_waitqueue_head(&pgdat->pfmemalloc_wait); in pgdat_init_internals()
7059 init_waitqueue_head(&pgdat->zswapd_wait); in pgdat_init_internals()
7063 spin_lock_init(&pgdat->lru_lock); in pgdat_init_internals()
7064 lruvec_init(&pgdat->__lruvec); in pgdat_init_internals()
7066 pgdat->__lruvec.pgdat = pgdat; in pgdat_init_internals()
7073 atomic_long_set(&zone->managed_pages, remaining_pages); in zone_init_internals()
7075 zone->name = zone_names[idx]; in zone_init_internals()
7076 zone->zone_pgdat = NODE_DATA(nid); in zone_init_internals()
7077 spin_lock_init(&zone->lock); in zone_init_internals()
7084 * - init pgdat internals
7085 * - init all zones belonging to this node
7097 zone_init_internals(&pgdat->node_zones[z], z, nid, 0); in free_area_init_core_hotplug()
7103 * - mark all pages reserved
7104 * - mark all memory queues empty
7105 * - clear the memory bitmaps
7113 int nid = pgdat->node_id; in free_area_init_core()
7116 pgdat->per_cpu_nodestats = &boot_nodestats; in free_area_init_core()
7119 struct zone *zone = pgdat->node_zones + j; in free_area_init_core()
7121 unsigned long zone_start_pfn = zone->zone_start_pfn; in free_area_init_core()
7123 size = zone->spanned_pages; in free_area_init_core()
7124 freesize = zone->present_pages; in free_area_init_core()
7129 * and per-cpu initialisations in free_area_init_core()
7134 freesize -= memmap_pages; in free_area_init_core()
7146 freesize -= dma_reserve; in free_area_init_core()
7155 nr_kernel_pages -= memmap_pages; in free_area_init_core()
7182 if (!pgdat->node_spanned_pages) in alloc_node_mem_map()
7185 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1); in alloc_node_mem_map()
7186 offset = pgdat->node_start_pfn - start; in alloc_node_mem_map()
7188 if (!pgdat->node_mem_map) { in alloc_node_mem_map()
7199 size = (end - start) * sizeof(struct page); in alloc_node_mem_map()
7201 pgdat->node_id); in alloc_node_mem_map()
7204 size, pgdat->node_id); in alloc_node_mem_map()
7205 pgdat->node_mem_map = map + offset; in alloc_node_mem_map()
7208 __func__, pgdat->node_id, (unsigned long)pgdat, in alloc_node_mem_map()
7209 (unsigned long)pgdat->node_mem_map); in alloc_node_mem_map()
7215 mem_map = NODE_DATA(0)->node_mem_map; in alloc_node_mem_map()
7216 if (page_to_pfn(mem_map) != pgdat->node_start_pfn) in alloc_node_mem_map()
7217 mem_map -= offset; in alloc_node_mem_map()
7228 pgdat->first_deferred_pfn = ULONG_MAX; in pgdat_set_deferred_range()
7241 WARN_ON(pgdat->nr_zones || pgdat->kswapd_highest_zoneidx); in free_area_init_node()
7245 pgdat->node_id = nid; in free_area_init_node()
7246 pgdat->node_start_pfn = start_pfn; in free_area_init_node()
7247 pgdat->per_cpu_nodestats = NULL; in free_area_init_node()
7249 pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid, in free_area_init_node()
7251 end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0); in free_area_init_node()
7279 * node_map_pfn_alignment - determine the maximum internode alignment
7286 * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the
7290 * This is used to test whether pfn -> nid mapping of the chosen memory
7312 * Start with a mask granular enough to pin-point to the in node_map_pfn_alignment()
7313 * start pfn and tick off bits one-by-one until it becomes in node_map_pfn_alignment()
7316 mask = ~((1 << __ffs(start)) - 1); in node_map_pfn_alignment()
7329 * find_min_pfn_with_active_regions - Find the minimum PFN registered
7351 unsigned long pages = end_pfn - start_pfn; in early_calculate_totalpages()
7391 usable_startpfn = PFN_DOWN(r->base); in find_zone_movable_pfns_for_nodes()
7453 * Round-up so that ZONE_MOVABLE is at least as large as what in find_zone_movable_pfns_for_nodes()
7459 corepages = totalpages - required_movablecore; in find_zone_movable_pfns_for_nodes()
7507 - start_pfn; in find_zone_movable_pfns_for_nodes()
7509 kernelcore_remaining -= min(kernel_pages, in find_zone_movable_pfns_for_nodes()
7511 required_kernelcore -= min(kernel_pages, in find_zone_movable_pfns_for_nodes()
7531 * start_pfn->end_pfn. Calculate size_pages as the in find_zone_movable_pfns_for_nodes()
7534 size_pages = end_pfn - start_pfn; in find_zone_movable_pfns_for_nodes()
7544 required_kernelcore -= min(required_kernelcore, in find_zone_movable_pfns_for_nodes()
7546 kernelcore_remaining -= size_pages; in find_zone_movable_pfns_for_nodes()
7558 usable_nodes--; in find_zone_movable_pfns_for_nodes()
7585 for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) { in check_for_memory()
7586 struct zone *zone = &pgdat->node_zones[zone_type]; in check_for_memory()
7607 * free_area_init - Initialise all pg_data_t and zone data
7636 zone = MAX_NR_ZONES - i - 1; in free_area_init()
7659 pr_info(" %-8s ", zone_names[i]); in free_area_init()
7664 pr_cont("[mem %#018Lx-%#018Lx]\n", in free_area_init()
7668 << PAGE_SHIFT) - 1); in free_area_init()
7681 * subsection-map relative to active online memory ranges to in free_area_init()
7682 * enable future "sub-section" extensions of the memory map. in free_area_init()
7686 pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid, in free_area_init()
7688 ((u64)end_pfn << PAGE_SHIFT) - 1); in free_area_init()
7689 subsection_map_init(start_pfn, end_pfn - start_pfn); in free_area_init()
7700 if (pgdat->node_present_pages) in free_area_init()
7715 return -EINVAL; in cmdline_parse_core()
7766 atomic_long_add(count, &page_zone(page)->managed_pages); in adjust_managed_page_count()
7802 s, pages << (PAGE_SHIFT - 10)); in free_reserved_area()
7812 atomic_long_inc(&page_zone(page)->managed_pages); in free_highmem_page()
7824 codesize = _etext - _stext; in mem_init_print_info()
7825 datasize = _edata - _sdata; in mem_init_print_info()
7826 rosize = __end_rodata - __start_rodata; in mem_init_print_info()
7827 bss_size = __bss_stop - __bss_start; in mem_init_print_info()
7828 init_data_size = __init_end - __init_begin; in mem_init_print_info()
7829 init_code_size = _einittext - _sinittext; in mem_init_print_info()
7841 size -= adj; \ in mem_init_print_info()
7853 …(%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved" in mem_init_print_info()
7858 nr_free_pages() << (PAGE_SHIFT - 10), in mem_init_print_info()
7859 physpages << (PAGE_SHIFT - 10), in mem_init_print_info()
7862 (physpages - totalram_pages() - totalcma_pages) << (PAGE_SHIFT - 10), in mem_init_print_info()
7863 totalcma_pages << (PAGE_SHIFT - 10), in mem_init_print_info()
7865 totalhigh_pages() << (PAGE_SHIFT - 10), in mem_init_print_info()
7871 * set_dma_reserve - set the specified number of pages reserved in the first zone
7874 * The per-cpu batchsize and zone watermarks are determined by managed_pages.
7879 * smaller per-cpu batchsize.
7940 * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio
7951 pgdat->totalreserve_pages = 0; in calculate_totalreserve_pages()
7954 struct zone *zone = pgdat->node_zones + i; in calculate_totalreserve_pages()
7960 if (zone->lowmem_reserve[j] > max) in calculate_totalreserve_pages()
7961 max = zone->lowmem_reserve[j]; in calculate_totalreserve_pages()
7970 pgdat->totalreserve_pages += max; in calculate_totalreserve_pages()
7979 * setup_per_zone_lowmem_reserve - called whenever
7990 for (i = 0; i < MAX_NR_ZONES - 1; i++) { in setup_per_zone_lowmem_reserve()
7991 struct zone *zone = &pgdat->node_zones[i]; in setup_per_zone_lowmem_reserve()
7997 struct zone *upper_zone = &pgdat->node_zones[j]; in setup_per_zone_lowmem_reserve()
8002 zone->lowmem_reserve[j] = 0; in setup_per_zone_lowmem_reserve()
8004 zone->lowmem_reserve[j] = managed_pages / ratio; in setup_per_zone_lowmem_reserve()
8015 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); in __setup_per_zone_wmarks()
8029 spin_lock_irqsave(&zone->lock, flags); in __setup_per_zone_wmarks()
8038 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN) in __setup_per_zone_wmarks()
8046 zone->_watermark[WMARK_MIN] = min_pages; in __setup_per_zone_wmarks()
8052 zone->_watermark[WMARK_MIN] = tmp; in __setup_per_zone_wmarks()
8064 zone->watermark_boost = 0; in __setup_per_zone_wmarks()
8065 zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp; in __setup_per_zone_wmarks()
8066 zone->_watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2; in __setup_per_zone_wmarks()
8068 spin_unlock_irqrestore(&zone->lock, flags); in __setup_per_zone_wmarks()
8076 * setup_per_zone_wmarks - called when min_free_kbytes changes
8077 * or when memory is hot-{added|removed}
8149 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so in postcore_initcall()
8191 pgdat->min_unmapped_pages = 0; in setup_min_unmapped_ratio()
8194 zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) * in setup_min_unmapped_ratio()
8219 pgdat->min_slab_pages = 0; in setup_min_slab_ratio()
8222 zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) * in setup_min_slab_ratio()
8242 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
8272 per_cpu_ptr(zone->pageset, cpu)); in __zone_pcp_update()
8276 * percpu_pagelist_fraction - changes the pcp->high for each zone on each
8298 ret = -EINVAL; in percpu_pagelist_fraction_sysctl_handler()
8330 * Because 32-bit systems cannot have large physical memory, where this scaling
8341 * - it is assumed that the hash table must contain an exact power-of-2
8343 * - limit is the number of hash buckets, not the total allocation size
8365 numentries -= arch_reserved_kernel_pages(); in alloc_large_system_hash()
8383 numentries >>= (scale - PAGE_SHIFT); in alloc_large_system_hash()
8385 numentries <<= (PAGE_SHIFT - scale); in alloc_large_system_hash()
8387 /* Make sure we've got at least a 0-order allocation.. */ in alloc_large_system_hash()
8429 * If bucketsize is not a power-of-two, we may free in alloc_large_system_hash()
8436 } while (!table && size > PAGE_SIZE && --log2qty); in alloc_large_system_hash()
8442 tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size, in alloc_large_system_hash()
8448 *_hash_mask = (1 << log2qty) - 1; in alloc_large_system_hash()
8458 * check without lock_page also may miss some movable non-lru pages at
8485 for (; iter < pageblock_nr_pages - offset; iter++) { in has_unmovable_pages()
8525 skip_pages = compound_nr(head) - (page - head); in has_unmovable_pages()
8526 iter += skip_pages - 1; in has_unmovable_pages()
8534 * because their page->_refcount is zero at all time. in has_unmovable_pages()
8538 iter += (1 << buddy_order(page)) - 1; in has_unmovable_pages()
8579 pageblock_nr_pages) - 1); in pfn_max_align_down()
8598 .nid = zone_to_nid(cc->zone), in __alloc_contig_migrate_range()
8604 while (pfn < end || !list_empty(&cc->migratepages)) { in __alloc_contig_migrate_range()
8606 ret = -EINTR; in __alloc_contig_migrate_range()
8610 if (list_empty(&cc->migratepages)) { in __alloc_contig_migrate_range()
8611 cc->nr_migratepages = 0; in __alloc_contig_migrate_range()
8614 ret = -EINTR; in __alloc_contig_migrate_range()
8619 ret = ret < 0 ? ret : -EBUSY; in __alloc_contig_migrate_range()
8623 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone, in __alloc_contig_migrate_range()
8624 &cc->migratepages); in __alloc_contig_migrate_range()
8625 cc->nr_migratepages -= nr_reclaimed; in __alloc_contig_migrate_range()
8627 ret = migrate_pages(&cc->migratepages, alloc_migration_target, in __alloc_contig_migrate_range()
8628 NULL, (unsigned long)&mtc, cc->mode, MR_CONTIG_RANGE); in __alloc_contig_migrate_range()
8631 putback_movable_pages(&cc->migratepages); in __alloc_contig_migrate_range()
8638 * alloc_contig_range() -- tries to allocate given range of pages
8640 * @end: one-past-the-last PFN to allocate
8667 .order = -1, in alloc_contig_range()
8707 * In case of -EBUSY, we'd like to know which page causes problem. in alloc_contig_range()
8714 * -EBUSY is not accidentally used or returned to caller. in alloc_contig_range()
8717 if (ret && ret != -EBUSY) in alloc_contig_range()
8734 * We don't have to hold zone->lock here because the pages are in alloc_contig_range()
8767 ret = -EBUSY; in alloc_contig_range()
8774 ret = -EBUSY; in alloc_contig_range()
8780 free_contig_range(outer_start, start - outer_start); in alloc_contig_range()
8782 free_contig_range(end, outer_end - end); in alloc_contig_range()
8829 unsigned long last_pfn = start_pfn + nr_pages - 1; in zone_spans_last_pfn()
8835 * alloc_contig_pages() -- tries to find and allocate contiguous range of pages
8866 spin_lock_irqsave(&zone->lock, flags); in alloc_contig_pages()
8868 pfn = ALIGN(zone->zone_start_pfn, nr_pages); in alloc_contig_pages()
8878 spin_unlock_irqrestore(&zone->lock, flags); in alloc_contig_pages()
8883 spin_lock_irqsave(&zone->lock, flags); in alloc_contig_pages()
8887 spin_unlock_irqrestore(&zone->lock, flags); in alloc_contig_pages()
8897 for (; nr_pages--; pfn++) { in free_contig_range()
8926 if (zone->pageset != &boot_pageset) { in zone_pcp_reset()
8928 pset = per_cpu_ptr(zone->pageset, cpu); in zone_pcp_reset()
8931 free_percpu(zone->pageset); in zone_pcp_reset()
8932 zone->pageset = &boot_pageset; in zone_pcp_reset()
8952 spin_lock_irqsave(&zone->lock, flags); in __offline_isolated_pages()
8980 spin_unlock_irqrestore(&zone->lock, flags); in __offline_isolated_pages()
8991 spin_lock_irqsave(&zone->lock, flags); in is_free_buddy_page()
8993 struct page *page_head = page - (pfn & ((1 << order) - 1)); in is_free_buddy_page()
8998 spin_unlock_irqrestore(&zone->lock, flags); in is_free_buddy_page()
9005 * Break down a higher-order page in sub-pages, and keep our target out of
9016 high--; in break_down_buddy_pages()
9049 spin_lock_irqsave(&zone->lock, flags); in take_page_off_buddy()
9051 struct page *page_head = page - (pfn & ((1 << order) - 1)); in take_page_off_buddy()
9063 __mod_zone_freepage_state(zone, -1, migratetype); in take_page_off_buddy()
9070 spin_unlock_irqrestore(&zone->lock, flags); in take_page_off_buddy()
9081 struct zone *zone = &pgdat->node_zones[ZONE_DMA]; in has_managed_dma()