• Home
  • Raw
  • Download

Lines Matching full:zone

307  * call this function after deciding the zone to which to
355 static unsigned long find_smallest_section_pfn(int nid, struct zone *zone, in find_smallest_section_pfn() argument
366 if (zone != page_zone(pfn_to_page(start_pfn))) in find_smallest_section_pfn()
376 static unsigned long find_biggest_section_pfn(int nid, struct zone *zone, in find_biggest_section_pfn() argument
391 if (zone != page_zone(pfn_to_page(pfn))) in find_biggest_section_pfn()
400 static void shrink_zone_span(struct zone *zone, unsigned long start_pfn, in shrink_zone_span() argument
404 int nid = zone_to_nid(zone); in shrink_zone_span()
406 zone_span_writelock(zone); in shrink_zone_span()
407 if (zone->zone_start_pfn == start_pfn) { in shrink_zone_span()
409 * If the section is smallest section in the zone, it need in shrink_zone_span()
410 * shrink zone->zone_start_pfn and zone->zone_spanned_pages. in shrink_zone_span()
412 * for shrinking zone. in shrink_zone_span()
414 pfn = find_smallest_section_pfn(nid, zone, end_pfn, in shrink_zone_span()
415 zone_end_pfn(zone)); in shrink_zone_span()
417 zone->spanned_pages = zone_end_pfn(zone) - pfn; in shrink_zone_span()
418 zone->zone_start_pfn = pfn; in shrink_zone_span()
420 zone->zone_start_pfn = 0; in shrink_zone_span()
421 zone->spanned_pages = 0; in shrink_zone_span()
423 } else if (zone_end_pfn(zone) == end_pfn) { in shrink_zone_span()
425 * If the section is biggest section in the zone, it need in shrink_zone_span()
426 * shrink zone->spanned_pages. in shrink_zone_span()
428 * shrinking zone. in shrink_zone_span()
430 pfn = find_biggest_section_pfn(nid, zone, zone->zone_start_pfn, in shrink_zone_span()
433 zone->spanned_pages = pfn - zone->zone_start_pfn + 1; in shrink_zone_span()
435 zone->zone_start_pfn = 0; in shrink_zone_span()
436 zone->spanned_pages = 0; in shrink_zone_span()
439 zone_span_writeunlock(zone); in shrink_zone_span()
445 struct zone *zone; in update_pgdat_span() local
447 for (zone = pgdat->node_zones; in update_pgdat_span()
448 zone < pgdat->node_zones + MAX_NR_ZONES; zone++) { in update_pgdat_span()
449 unsigned long zone_end_pfn = zone->zone_start_pfn + in update_pgdat_span()
450 zone->spanned_pages; in update_pgdat_span()
453 if (!zone->spanned_pages) in update_pgdat_span()
456 node_start_pfn = zone->zone_start_pfn; in update_pgdat_span()
463 if (zone->zone_start_pfn < node_start_pfn) in update_pgdat_span()
464 node_start_pfn = zone->zone_start_pfn; in update_pgdat_span()
471 void __ref remove_pfn_range_from_zone(struct zone *zone, in remove_pfn_range_from_zone() argument
476 struct pglist_data *pgdat = zone->zone_pgdat; in remove_pfn_range_from_zone()
492 * Zone shrinking code cannot properly deal with ZONE_DEVICE. So in remove_pfn_range_from_zone()
496 if (zone_idx(zone) == ZONE_DEVICE) in remove_pfn_range_from_zone()
500 clear_zone_contiguous(zone); in remove_pfn_range_from_zone()
502 pgdat_resize_lock(zone->zone_pgdat, &flags); in remove_pfn_range_from_zone()
503 shrink_zone_span(zone, start_pfn, start_pfn + nr_pages); in remove_pfn_range_from_zone()
505 pgdat_resize_unlock(zone->zone_pgdat, &flags); in remove_pfn_range_from_zone()
507 set_zone_contiguous(zone); in remove_pfn_range_from_zone()
620 * zone ("present"). in online_pages_range()
631 struct zone *zone, struct memory_notify *arg) in node_states_check_changes_online() argument
633 int nid = zone_to_nid(zone); in node_states_check_changes_online()
641 if (zone_idx(zone) <= ZONE_NORMAL && !node_state(nid, N_NORMAL_MEMORY)) in node_states_check_changes_online()
644 if (zone_idx(zone) <= ZONE_HIGHMEM && !node_state(nid, N_HIGH_MEMORY)) in node_states_check_changes_online()
661 static void __meminit resize_zone_range(struct zone *zone, unsigned long start_pfn, in resize_zone_range() argument
664 unsigned long old_end_pfn = zone_end_pfn(zone); in resize_zone_range()
666 if (zone_is_empty(zone) || start_pfn < zone->zone_start_pfn) in resize_zone_range()
667 zone->zone_start_pfn = start_pfn; in resize_zone_range()
669 zone->spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - zone->zone_start_pfn; in resize_zone_range()
684 * Associate the pfn range with the given zone, initializing the memmaps
685 * and resizing the pgdat/zone data to span the added pages. After this
690 * zone stats (e.g., nr_isolate_pageblock) are touched.
692 void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, in move_pfn_range_to_zone() argument
696 struct pglist_data *pgdat = zone->zone_pgdat; in move_pfn_range_to_zone()
700 clear_zone_contiguous(zone); in move_pfn_range_to_zone()
702 /* TODO Huh pgdat is irqsave while zone is not. It used to be like that before */ in move_pfn_range_to_zone()
704 zone_span_writelock(zone); in move_pfn_range_to_zone()
705 if (zone_is_empty(zone)) in move_pfn_range_to_zone()
706 init_currently_empty_zone(zone, start_pfn, nr_pages); in move_pfn_range_to_zone()
707 resize_zone_range(zone, start_pfn, nr_pages); in move_pfn_range_to_zone()
708 zone_span_writeunlock(zone); in move_pfn_range_to_zone()
714 * with their zone properly. Not nice but set_pfnblock_flags_mask in move_pfn_range_to_zone()
715 * expects the zone spans the pfn range. All the pages in the range in move_pfn_range_to_zone()
718 memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn, 0, in move_pfn_range_to_zone()
721 set_zone_contiguous(zone); in move_pfn_range_to_zone()
725 * Returns a default kernel memory zone for the given pfn range.
726 * If no kernel zone covers this pfn range it will automatically go
729 static struct zone *default_kernel_zone_for_pfn(int nid, unsigned long start_pfn, in default_kernel_zone_for_pfn()
736 struct zone *zone = &pgdat->node_zones[zid]; in default_kernel_zone_for_pfn() local
738 if (zone_intersects(zone, start_pfn, nr_pages)) in default_kernel_zone_for_pfn()
739 return zone; in default_kernel_zone_for_pfn()
745 static inline struct zone *default_zone_for_pfn(int nid, unsigned long start_pfn, in default_zone_for_pfn()
748 struct zone *kernel_zone = default_kernel_zone_for_pfn(nid, start_pfn, in default_zone_for_pfn()
750 struct zone *movable_zone = &NODE_DATA(nid)->node_zones[ZONE_MOVABLE]; in default_zone_for_pfn()
755 * We inherit the existing zone in a simple case where zones do not in default_zone_for_pfn()
762 * If the range doesn't belong to any zone or two zones overlap in the in default_zone_for_pfn()
763 * given range then we use movable zone only if movable_node is in default_zone_for_pfn()
764 * enabled because we always online to a kernel zone by default. in default_zone_for_pfn()
769 struct zone *zone_for_pfn_range(int online_type, int nid, in zone_for_pfn_range()
785 struct zone *zone; in online_pages() local
797 /* associate pfn range with the zone */ in online_pages()
798 zone = zone_for_pfn_range(online_type, nid, pfn, nr_pages); in online_pages()
799 move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_ISOLATE); in online_pages()
803 node_states_check_changes_online(nr_pages, zone, &arg); in online_pages()
814 spin_lock_irqsave(&zone->lock, flags); in online_pages()
815 zone->nr_isolate_pageblock += nr_pages / pageblock_nr_pages; in online_pages()
816 spin_unlock_irqrestore(&zone->lock, flags); in online_pages()
819 * If this zone is not populated, then it is not in zonelist. in online_pages()
820 * This means the page allocator ignores this zone. in online_pages()
823 if (!populated_zone(zone)) { in online_pages()
825 setup_zone_pageset(zone); in online_pages()
829 zone->present_pages += nr_pages; in online_pages()
831 pgdat_resize_lock(zone->zone_pgdat, &flags); in online_pages()
832 zone->zone_pgdat->node_present_pages += nr_pages; in online_pages()
833 pgdat_resize_unlock(zone->zone_pgdat, &flags); in online_pages()
838 zone_pcp_update(zone); in online_pages()
846 * zone to make sure the just onlined pages are properly distributed in online_pages()
849 shuffle_zone(zone); in online_pages()
870 remove_pfn_range_from_zone(zone, pfn, nr_pages); in online_pages()
878 struct zone *z; in reset_node_present_pages()
926 * The node we allocated has no zone fallback lists. For avoiding in hotadd_new_pgdat()
1190 * Confirm all pages in a range [start, end) belong to the same zone (skipping
1191 * memory holes). When true, return the zone.
1193 struct zone *test_pages_in_a_zone(unsigned long start_pfn, in test_pages_in_a_zone()
1197 struct zone *zone = NULL; in test_pages_in_a_zone() local
1215 /* Check if we got outside of the zone */ in test_pages_in_a_zone()
1216 if (zone && !zone_spans_pfn(zone, pfn + i)) in test_pages_in_a_zone()
1219 if (zone && page_zone(page) != zone) in test_pages_in_a_zone()
1221 zone = page_zone(page); in test_pages_in_a_zone()
1225 return zone; in test_pages_in_a_zone()
1349 * We have checked that migration range is on a single zone so in do_migrate_range()
1388 struct zone *zone, struct memory_notify *arg) in node_states_check_changes_offline() argument
1390 struct pglist_data *pgdat = zone->zone_pgdat; in node_states_check_changes_offline()
1408 if (zone_idx(zone) <= ZONE_NORMAL && nr_pages >= present_pages) in node_states_check_changes_offline()
1409 arg->status_change_nid_normal = zone_to_nid(zone); in node_states_check_changes_offline()
1416 * If the zone is within the range of [0..ZONE_HIGHMEM), and in node_states_check_changes_offline()
1421 if (zone_idx(zone) <= ZONE_HIGHMEM && nr_pages >= present_pages) in node_states_check_changes_offline()
1422 arg->status_change_nid_high = zone_to_nid(zone); in node_states_check_changes_offline()
1438 arg->status_change_nid = zone_to_nid(zone); in node_states_check_changes_offline()
1467 struct zone *zone; in offline_pages() local
1497 zone = test_pages_in_a_zone(start_pfn, end_pfn); in offline_pages()
1498 if (!zone) { in offline_pages()
1503 node = zone_to_nid(zone); in offline_pages()
1516 node_states_check_changes_offline(nr_pages, zone, &arg); in offline_pages()
1577 drain_all_pages(zone); in offline_pages()
1589 spin_lock_irqsave(&zone->lock, flags); in offline_pages()
1590 zone->nr_isolate_pageblock -= nr_pages / pageblock_nr_pages; in offline_pages()
1591 spin_unlock_irqrestore(&zone->lock, flags); in offline_pages()
1595 zone->present_pages -= nr_pages; in offline_pages()
1597 pgdat_resize_lock(zone->zone_pgdat, &flags); in offline_pages()
1598 zone->zone_pgdat->node_present_pages -= nr_pages; in offline_pages()
1599 pgdat_resize_unlock(zone->zone_pgdat, &flags); in offline_pages()
1603 if (!populated_zone(zone)) { in offline_pages()
1604 zone_pcp_reset(zone); in offline_pages()
1607 zone_pcp_update(zone); in offline_pages()
1621 remove_pfn_range_from_zone(zone, start_pfn, nr_pages); in offline_pages()
1812 * Sense the online_type via the zone of the memory block. Offlining in try_offline_memory_block()