• Home
  • Raw
  • Download

Lines Matching full:zone

265  * call this function after deciding the zone to which to
313 static unsigned long find_smallest_section_pfn(int nid, struct zone *zone, in find_smallest_section_pfn() argument
324 if (zone && zone != page_zone(pfn_to_page(start_pfn))) in find_smallest_section_pfn()
334 static unsigned long find_biggest_section_pfn(int nid, struct zone *zone, in find_biggest_section_pfn() argument
349 if (zone && zone != page_zone(pfn_to_page(pfn))) in find_biggest_section_pfn()
358 static void shrink_zone_span(struct zone *zone, unsigned long start_pfn, in shrink_zone_span() argument
361 unsigned long zone_start_pfn = zone->zone_start_pfn; in shrink_zone_span()
362 unsigned long z = zone_end_pfn(zone); /* zone_end_pfn namespace clash */ in shrink_zone_span()
365 int nid = zone_to_nid(zone); in shrink_zone_span()
367 zone_span_writelock(zone); in shrink_zone_span()
370 * If the section is smallest section in the zone, it need in shrink_zone_span()
371 * shrink zone->zone_start_pfn and zone->zone_spanned_pages. in shrink_zone_span()
373 * for shrinking zone. in shrink_zone_span()
375 pfn = find_smallest_section_pfn(nid, zone, end_pfn, in shrink_zone_span()
378 zone->zone_start_pfn = pfn; in shrink_zone_span()
379 zone->spanned_pages = zone_end_pfn - pfn; in shrink_zone_span()
383 * If the section is biggest section in the zone, it need in shrink_zone_span()
384 * shrink zone->spanned_pages. in shrink_zone_span()
386 * shrinking zone. in shrink_zone_span()
388 pfn = find_biggest_section_pfn(nid, zone, zone_start_pfn, in shrink_zone_span()
391 zone->spanned_pages = pfn - zone_start_pfn + 1; in shrink_zone_span()
395 * The section is not biggest or smallest mem_section in the zone, it in shrink_zone_span()
396 * only creates a hole in the zone. So in this case, we need not in shrink_zone_span()
397 * change the zone. But perhaps, the zone has only hole data. Thus in shrink_zone_span()
398 * it check the zone has only hole or not. in shrink_zone_span()
405 if (page_zone(pfn_to_page(pfn)) != zone) in shrink_zone_span()
413 zone_span_writeunlock(zone); in shrink_zone_span()
417 /* The zone has no valid section */ in shrink_zone_span()
418 zone->zone_start_pfn = 0; in shrink_zone_span()
419 zone->spanned_pages = 0; in shrink_zone_span()
420 zone_span_writeunlock(zone); in shrink_zone_span()
426 struct zone *zone; in update_pgdat_span() local
428 for (zone = pgdat->node_zones; in update_pgdat_span()
429 zone < pgdat->node_zones + MAX_NR_ZONES; zone++) { in update_pgdat_span()
430 unsigned long zone_end_pfn = zone->zone_start_pfn + in update_pgdat_span()
431 zone->spanned_pages; in update_pgdat_span()
434 if (!zone->spanned_pages) in update_pgdat_span()
437 node_start_pfn = zone->zone_start_pfn; in update_pgdat_span()
444 if (zone->zone_start_pfn < node_start_pfn) in update_pgdat_span()
445 node_start_pfn = zone->zone_start_pfn; in update_pgdat_span()
452 void __ref remove_pfn_range_from_zone(struct zone *zone, in remove_pfn_range_from_zone() argument
456 struct pglist_data *pgdat = zone->zone_pgdat; in remove_pfn_range_from_zone()
461 * Zone shrinking code cannot properly deal with ZONE_DEVICE. So in remove_pfn_range_from_zone()
465 if (zone_idx(zone) == ZONE_DEVICE) in remove_pfn_range_from_zone()
469 clear_zone_contiguous(zone); in remove_pfn_range_from_zone()
471 pgdat_resize_lock(zone->zone_pgdat, &flags); in remove_pfn_range_from_zone()
472 shrink_zone_span(zone, start_pfn, start_pfn + nr_pages); in remove_pfn_range_from_zone()
474 pgdat_resize_unlock(zone->zone_pgdat, &flags); in remove_pfn_range_from_zone()
476 set_zone_contiguous(zone); in remove_pfn_range_from_zone()
615 struct zone *zone, struct memory_notify *arg) in node_states_check_changes_online() argument
617 int nid = zone_to_nid(zone); in node_states_check_changes_online()
633 * if the memory to be online is in a zone of 0...zone_last, and in node_states_check_changes_online()
638 if (zone_idx(zone) <= zone_last && !node_state(nid, N_NORMAL_MEMORY)) in node_states_check_changes_online()
657 if (zone_idx(zone) <= zone_last && !node_state(nid, N_HIGH_MEMORY)) in node_states_check_changes_online()
687 static void __meminit resize_zone_range(struct zone *zone, unsigned long start_pfn, in resize_zone_range() argument
690 unsigned long old_end_pfn = zone_end_pfn(zone); in resize_zone_range()
692 if (zone_is_empty(zone) || start_pfn < zone->zone_start_pfn) in resize_zone_range()
693 zone->zone_start_pfn = start_pfn; in resize_zone_range()
695 zone->spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - zone->zone_start_pfn; in resize_zone_range()
709 void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, in move_pfn_range_to_zone() argument
712 struct pglist_data *pgdat = zone->zone_pgdat; in move_pfn_range_to_zone()
716 if (zone_is_empty(zone)) in move_pfn_range_to_zone()
717 init_currently_empty_zone(zone, start_pfn, nr_pages); in move_pfn_range_to_zone()
719 clear_zone_contiguous(zone); in move_pfn_range_to_zone()
721 /* TODO Huh pgdat is irqsave while zone is not. It used to be like that before */ in move_pfn_range_to_zone()
723 zone_span_writelock(zone); in move_pfn_range_to_zone()
724 resize_zone_range(zone, start_pfn, nr_pages); in move_pfn_range_to_zone()
725 zone_span_writeunlock(zone); in move_pfn_range_to_zone()
731 * with their zone properly. Not nice but set_pfnblock_flags_mask in move_pfn_range_to_zone()
732 * expects the zone spans the pfn range. All the pages in the range in move_pfn_range_to_zone()
735 memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn, in move_pfn_range_to_zone()
738 set_zone_contiguous(zone); in move_pfn_range_to_zone()
742 * Returns a default kernel memory zone for the given pfn range.
743 * If no kernel zone covers this pfn range it will automatically go
746 static struct zone *default_kernel_zone_for_pfn(int nid, unsigned long start_pfn, in default_kernel_zone_for_pfn()
753 struct zone *zone = &pgdat->node_zones[zid]; in default_kernel_zone_for_pfn() local
755 if (zone_intersects(zone, start_pfn, nr_pages)) in default_kernel_zone_for_pfn()
756 return zone; in default_kernel_zone_for_pfn()
762 static inline struct zone *default_zone_for_pfn(int nid, unsigned long start_pfn, in default_zone_for_pfn()
765 struct zone *kernel_zone = default_kernel_zone_for_pfn(nid, start_pfn, in default_zone_for_pfn()
767 struct zone *movable_zone = &NODE_DATA(nid)->node_zones[ZONE_MOVABLE]; in default_zone_for_pfn()
772 * We inherit the existing zone in a simple case where zones do not in default_zone_for_pfn()
779 * If the range doesn't belong to any zone or two zones overlap in the in default_zone_for_pfn()
780 * given range then we use movable zone only if movable_node is in default_zone_for_pfn()
781 * enabled because we always online to a kernel zone by default. in default_zone_for_pfn()
786 struct zone * zone_for_pfn_range(int online_type, int nid, unsigned start_pfn, in zone_for_pfn_range()
799 * Associates the given pfn range with the given node and the zone appropriate
802 static struct zone * __meminit move_pfn_range(int online_type, int nid, in move_pfn_range()
805 struct zone *zone; in move_pfn_range() local
807 zone = zone_for_pfn_range(online_type, nid, start_pfn, nr_pages); in move_pfn_range()
808 move_pfn_range_to_zone(zone, start_pfn, nr_pages, NULL); in move_pfn_range()
809 return zone; in move_pfn_range()
816 struct zone *zone; in online_pages() local
833 /* associate pfn range with the zone */ in online_pages()
834 zone = move_pfn_range(online_type, nid, pfn, nr_pages); in online_pages()
838 node_states_check_changes_online(nr_pages, zone, &arg); in online_pages()
846 * If this zone is not populated, then it is not in zonelist. in online_pages()
847 * This means the page allocator ignores this zone. in online_pages()
850 if (!populated_zone(zone)) { in online_pages()
852 setup_zone_pageset(zone); in online_pages()
859 zone_pcp_reset(zone); in online_pages()
863 zone->present_pages += onlined_pages; in online_pages()
865 pgdat_resize_lock(zone->zone_pgdat, &flags); in online_pages()
866 zone->zone_pgdat->node_present_pages += onlined_pages; in online_pages()
867 pgdat_resize_unlock(zone->zone_pgdat, &flags); in online_pages()
874 zone_pcp_update(zone); in online_pages()
898 remove_pfn_range_from_zone(zone, pfn, nr_pages); in online_pages()
906 struct zone *z; in reset_node_present_pages()
948 * The node we allocated has no zone fallback lists. For avoiding in hotadd_new_pgdat()
1194 struct zone *zone; in is_pageblock_removable_nolock() local
1198 * sections which are not zone aware so we might end up outside of in is_pageblock_removable_nolock()
1199 * the zone but still within the section. in is_pageblock_removable_nolock()
1206 zone = page_zone(page); in is_pageblock_removable_nolock()
1208 if (!zone_spans_pfn(zone, pfn)) in is_pageblock_removable_nolock()
1211 return !has_unmovable_pages(zone, page, 0, MIGRATE_MOVABLE, true); in is_pageblock_removable_nolock()
1234 * Confirm all pages in a range [start, end) belong to the same zone.
1242 struct zone *zone = NULL; in test_pages_in_a_zone() local
1260 /* Check if we got outside of the zone */ in test_pages_in_a_zone()
1261 if (zone && !zone_spans_pfn(zone, pfn + i)) in test_pages_in_a_zone()
1264 if (zone && page_zone(page) != zone) in test_pages_in_a_zone()
1266 if (!zone) in test_pages_in_a_zone()
1268 zone = page_zone(page); in test_pages_in_a_zone()
1273 if (zone) { in test_pages_in_a_zone()
1402 /* Because we don't have big zone->lock. we should in do_migrate_range()
1487 struct zone *zone, struct memory_notify *arg) in node_states_check_changes_offline() argument
1489 struct pglist_data *pgdat = zone->zone_pgdat; in node_states_check_changes_offline()
1507 * If the memory to be offline is in a zone of 0...zone_last, in node_states_check_changes_offline()
1514 if (zone_idx(zone) <= zone_last && nr_pages >= present_pages) in node_states_check_changes_offline()
1515 arg->status_change_nid_normal = zone_to_nid(zone); in node_states_check_changes_offline()
1535 if (zone_idx(zone) <= zone_last && nr_pages >= present_pages) in node_states_check_changes_offline()
1536 arg->status_change_nid_high = zone_to_nid(zone); in node_states_check_changes_offline()
1557 arg->status_change_nid = zone_to_nid(zone); in node_states_check_changes_offline()
1584 struct zone *zone; in __offline_pages() local
1603 zone = page_zone(pfn_to_page(valid_start)); in __offline_pages()
1604 node = zone_to_nid(zone); in __offline_pages()
1617 node_states_check_changes_offline(nr_pages, zone, &arg); in __offline_pages()
1633 drain_all_pages(zone); in __offline_pages()
1660 zone->present_pages -= offlined_pages; in __offline_pages()
1662 pgdat_resize_lock(zone->zone_pgdat, &flags); in __offline_pages()
1663 zone->zone_pgdat->node_present_pages -= offlined_pages; in __offline_pages()
1664 pgdat_resize_unlock(zone->zone_pgdat, &flags); in __offline_pages()
1668 if (!populated_zone(zone)) { in __offline_pages()
1669 zone_pcp_reset(zone); in __offline_pages()
1672 zone_pcp_update(zone); in __offline_pages()
1684 remove_pfn_range_from_zone(zone, start_pfn, nr_pages); in __offline_pages()